Loading...
Please wait while we prepare your experience
Please wait while we prepare your experience
Master custom nodes, batch automation, and 10-second renders - the exact workflows used by agencies managing dozens of AI influencers
Custom nodes automate repetitive workflows and create functionality that doesn't exist in standard ComfyUI. Agencies managing 50+ AI influencers save 30+ hours weekly with custom automation nodes.
Automatically applies your character's LoRA, embeddings, and facial consistency settings to every generation. Eliminates manual configuration errors that break character consistency.
# ComfyUI Custom Node: CharacterConsistencyEnforcer.py
import torch
import folder_paths
class CharacterConsistencyEnforcer:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"model": ("MODEL",),
"character_name": (["Ava", "Marcus", "Luna", "Custom"],),
"lora_strength": ("FLOAT", {"default": 0.85, "min": 0.0, "max": 1.0, "step": 0.05}),
"face_lock_strength": ("FLOAT", {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.05}),
},
"optional": {
"custom_lora_path": ("STRING", {"default": ""}),
}
}
RETURN_TYPES = ("MODEL", "STRING",)
RETURN_NAMES = ("model", "character_prompt",)
FUNCTION = "enforce_consistency"
CATEGORY = "AI_Influencer/Character"
def enforce_consistency(self, model, character_name, lora_strength, face_lock_strength, custom_lora_path=""):
# Load character-specific LoRA
lora_path = self.get_lora_path(character_name, custom_lora_path)
model_lora = self.load_lora(model, lora_path, lora_strength)
# Apply facial consistency embeddings
character_prompt = self.build_consistency_prompt(character_name, face_lock_strength)
return (model_lora, character_prompt)
def get_lora_path(self, character_name, custom_path):
if custom_path:
return custom_path
character_loras = {
"Ava": "loras/ava_consistent_v3.safetensors",
"Marcus": "loras/marcus_fitness_v2.safetensors",
"Luna": "loras/luna_fashion_v4.safetensors",
}
return character_loras.get(character_name, "")
def build_consistency_prompt(self, character_name, strength):
base_prompts = {
"Ava": f"(ava_character:1.{int(strength*10)}), consistent facial features, same person, identical appearance",
"Marcus": f"(marcus_face:1.{int(strength*10)}), athletic build, consistent identity",
"Luna": f"(luna_identity:1.{int(strength*10)}), unique facial structure, same individual",
}
return base_prompts.get(character_name, "consistent character")
NODE_CLASS_MAPPINGS = {
"CharacterConsistencyEnforcer": CharacterConsistencyEnforcer
}
NODE_DISPLAY_NAME_MAPPINGS = {
"CharacterConsistencyEnforcer": "🎭 Character Consistency"
}# BatchScenarioGenerator.py
import random
import json
from datetime import datetime, timedelta
class BatchScenarioGenerator:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"character_type": (["Fashion", "Fitness", "Lifestyle", "Tech"],),
"num_scenarios": ("INT", {"default": 30, "min": 1, "max": 100}),
"variety_level": (["Low", "Medium", "High"],),
"include_seasonal": ("BOOLEAN", {"default": True}),
}
}
RETURN_TYPES = ("STRING", "INT",)
RETURN_NAMES = ("scenario_prompts", "total_count",)
FUNCTION = "generate_scenarios"
CATEGORY = "AI_Influencer/Automation"
def __init__(self):
self.scenario_templates = {
"Fashion": {
"locations": ["boutique", "rooftop", "city street", "cafe", "art gallery", "penthouse", "beach club"],
"outfits": ["elegant dress", "casual chic", "streetwear", "business attire", "evening gown", "athleisure"],
"activities": ["shopping", "posing", "walking", "sitting at cafe", "attending event"],
"times": ["golden hour", "midday", "blue hour", "evening", "morning light"],
"moods": ["confident", "playful", "sophisticated", "relaxed", "energetic"]
},
"Fitness": {
"locations": ["gym", "outdoor track", "beach", "home workout", "yoga studio", "boxing gym", "park"],
"outfits": ["sports bra and leggings", "tank top and shorts", "athletic set", "yoga attire"],
"activities": ["lifting weights", "running", "yoga pose", "boxing", "stretching", "protein shake"],
"times": ["early morning", "midday workout", "evening session", "post-workout"],
"moods": ["motivated", "intense", "zen", "accomplished", "energetic"]
},
"Lifestyle": {
"locations": ["home office", "cozy living room", "balcony", "kitchen", "bedroom", "garden"],
"outfits": ["casual wear", "loungewear", "smart casual", "cozy sweater"],
"activities": ["working on laptop", "reading book", "coffee moment", "relaxing", "creating content"],
"times": ["morning", "afternoon", "evening", "sunset"],
"moods": ["productive", "relaxed", "inspired", "contemplative", "cozy"]
},
"Tech": {
"locations": ["modern office", "co-working space", "tech lab", "home setup", "conference"],
"outfits": ["smart casual", "tech startup style", "business casual", "hoodie and jeans"],
"activities": ["coding", "presenting", "reviewing gadgets", "testing device", "teaching"],
"times": ["office hours", "late night coding", "daytime", "evening"],
"moods": ["focused", "enthusiastic", "analytical", "innovative", "professional"]
}
}
def generate_scenarios(self, character_type, num_scenarios, variety_level, include_seasonal):
templates = self.scenario_templates.get(character_type, self.scenario_templates["Lifestyle"])
variety_multiplier = {"Low": 0.5, "Medium": 0.7, "High": 1.0}[variety_level]
scenarios = []
for i in range(num_scenarios):
# Create unique combinations
location = random.choice(templates["locations"])
outfit = random.choice(templates["outfits"])
activity = random.choice(templates["activities"])
time = random.choice(templates["times"])
mood = random.choice(templates["moods"])
# Build prompt
scenario = f"{mood} {character_type.lower()} influencer, {activity} at {location}, "
scenario += f"wearing {outfit}, {time} lighting, professional photography, "
scenario += f"high quality, detailed, 8k resolution"
# Add seasonal elements if enabled
if include_seasonal:
current_month = datetime.now().month
season_props = self.get_seasonal_props(current_month)
scenario += f", {season_props}"
scenarios.append(f"Scenario {i+1}: {scenario}")
# Format as JSON for batch processing
output = "\n\n".join(scenarios)
return (output, len(scenarios))
def get_seasonal_props(self, month):
seasons = {
(12, 1, 2): "winter atmosphere, cozy vibes, warm tones",
(3, 4, 5): "spring freshness, bright colors, natural light",
(6, 7, 8): "summer vibes, vibrant energy, golden hour",
(9, 10, 11): "autumn mood, warm colors, soft lighting"
}
for months, props in seasons.items():
if month in months:
return props
return ""
NODE_CLASS_MAPPINGS = {
"BatchScenarioGenerator": BatchScenarioGenerator
}
NODE_DISPLAY_NAME_MAPPINGS = {
"BatchScenarioGenerator": "📋 Batch Scenario Generator"
}Learn custom nodes, batch automation, and agency-level workflows in our complete AI Influencer course
✓ Lifetime access ✓ 50+ hours of content ✓ Custom node library included
Controls overall body position, limb placement, and gesture. Use weight: 0.7-0.9
Maintains 3D spatial relationships and depth hierarchy. Use weight: 0.4-0.6
Preserves sharp edges and fine details. Use weight: 0.3-0.5
// Professional Multi-ControlNet Workflow
LoadCheckpoint → SDXL 1.0 Base
↓
LoadLoRA → Character LoRA (weight: 0.85)
↓
[PARALLEL CONTROLNET PROCESSING]
├─ LoadImage (Reference Pose) → OpenPosePreprocessor
│ ↓
│ ControlNetApply (OpenPose Model, strength: 0.8)
│
├─ LoadImage (Same Reference) → DepthPreprocessor (MiDaS)
│ ↓
│ ControlNetApply (Depth Model, strength: 0.5)
│
└─ LoadImage (Same Reference) → CannyEdgePreprocessor
↓
ControlNetApply (Canny Model, strength: 0.4)
↓
ControlNetStack Combiner → Merge all three controls
↓
CLIPTextEncode (Positive) → "[character name], professional photo,
high fashion, detailed face, {activity description}, {lighting},
studio quality, 8k, sharp focus"
↓
CLIPTextEncode (Negative) → "deformed, bad anatomy, blurry,
low quality, inconsistent face, mutations, distorted limbs"
↓
KSampler →
• Steps: 35-45
• CFG: 7.5
• Sampler: DPM++ 2M SDE Karras
• Denoise: 0.75 (lower for more control adherence)
↓
FaceDetailer → Enhance facial features
↓
UltimateSDUpscale → 4x resolution (512x768 → 2048x3072)
↓
SaveImage → Auto-naming: {character}_{date}_{scenario}.png| Control Type | Weight Range | Use Case | Pro Tip |
|---|---|---|---|
| OpenPose | 0.7 - 0.9 | Dynamic poses, full body | Higher = exact pose, lower = interpretation |
| Depth Map | 0.4 - 0.6 | Spatial depth, composition | Keep low to avoid flatness |
| Canny Edge | 0.3 - 0.5 | Detail preservation | Too high = traced look |
| Scribble | 0.5 - 0.7 | Loose composition guide | Great for creative freedom |
| IP-Adapter | 0.6 - 0.8 | Style transfer, face consistency | Combine with FaceID for best results |
Lock in character identity with maximum weight
(ava_character:1.4), (consistent_face:1.3), same person, identical facial featuresProfessional photography standards
professional photography, high quality, 8k resolution, sharp focus, detailed, photorealisticWhat the character is doing
sitting at outdoor cafe, holding coffee cup, relaxed posture, engaging with cameraAesthetic and appearance details
wearing elegant summer dress, natural makeup, flowing hair, gold jewelry, sophisticated styleSetting and lighting conditions
european cafe terrace, afternoon golden hour, natural sunlight, bokeh background, depth of field(ava_character:1.4), (consistent_face:1.3), same person, identical facial features, professional photography, high quality, 8k resolution, sharp focus, detailed, photorealistic, sitting at outdoor cafe, holding coffee cup, relaxed posture, engaging with camera, wearing elegant summer dress, natural makeup, flowing hair, gold jewelry, sophisticated style, european cafe terrace, afternoon golden hour, natural sunlight, bokeh background, depth of fieldUse Emphasis Weights Strategically
1.0-1.2 = standard, 1.3-1.5 = strong, above 1.5 = often too much
Order Matters
Most important terms first, they have more influence
Negative Prompts Are Critical
List everything you DON'T want - saves regenerations
Save Prompt Templates
Create reusable templates for each character and scenario type
// Optimized Speed Workflow (8-12 seconds per image) LoadCheckpoint → SDXL Turbo 1.0 ↓ LoadLoRA → Character LoRA (weight: 0.9) ↓ LoadLoRA → LCM LoRA (weight: 0.8) ← Speed booster ↓ ControlNetApply → OpenPose only (remove others for speed) • Strength: 0.7 • Preprocessor: optimized OpenPose ↓ CLIPTextEncode → Use shorter, optimized prompts ↓ KSampler (OPTIMIZED): • Steps: 20-25 (vs 35-45 standard) • CFG: 6.0 (lower = faster) • Sampler: LCM or Euler a (fastest) • Scheduler: Simple (not Karras) • Denoise: 0.8 ↓ FastVAEDecode → Optimized VAE ↓ [SKIP INITIAL UPSCALING - Do later in batch] ↓ SaveImage → Queue next generation // Post-Processing Batch Upscale LoadImageBatch → All generated images ↓ UltimateSDUpscale → Process all at once • 4x upscale • Tile size: 512 • Batch size: 10-20 images ↓ FaceDetailer Batch → Enhance all faces ↓ SaveImageBatch → Final output
Boutique AI influencer agency • Los Angeles
3-person team managing 50+ AI influencers across fashion, fitness, lifestyle niches. Built entire operation on advanced ComfyUI workflows.
Active Influencers
Daily Time Investment
Automation Level
Monthly Revenue
Loads all character assets, LoRAs, and settings from single config file
Queues 1000+ image generations with intelligent GPU management
AI-powered check that flags inconsistent faces before posting
Automatically adjusts scenarios based on current season, holidays, trends
Batch upscales, applies FaceDetailer, adds watermark, exports platform-specific sizes
| Custom Node | Build Time | Learning Curve | Time Savings | ROI |
|---|---|---|---|---|
| Character Consistency | 4-6 hours | Medium | 95% | Excellent |
| Batch Scenario Generator | 6-8 hours | Hard | 99% | Outstanding |
| Smart Batch Queue | 8-12 hours | Very Hard | 98% | Excellent |
| Consistency Validator | 10-15 hours | Very Hard | 85% | Good |
| Seasonal Scenario | 3-5 hours | Easy | 60% | Excellent |
| Auto Upscale Post | 4-6 hours | Medium | 90% | Outstanding |
Character Consistency (Week 1)
Foundation of everything - build this first
Seasonal Scenario (Week 2)
Easy win, huge time savings
Auto Upscale Post (Week 3)
Eliminates tedious final steps
Batch Scenario Generator (Month 2)
Worth the effort when scaling to 5+ influencers
Smart Batch Queue (Month 3)
Essential for agency-level operations
Get the complete system for creating and scaling multiple AI influencers with professional workflows
✓ Custom node templates ✓ Agency workflow blueprints ✓ 24/7 support
Basic Python knowledge helps but isn't mandatory. Start by modifying existing nodes from GitHub - most follow similar patterns you can copy. The ComfyUI documentation provides templates. Many users learn by doing: copy a simple node, change one thing, test. Alternatively, hire a Python developer on Fiverr ($50-150) to build your first few nodes from your specifications.
Minimum: RTX 3060 12GB for basic operation (20-30 sec/image). Recommended: RTX 4080 or 3090 (10-15 sec/image). Optimal: RTX 4090 (8-10 sec/image). Can't afford? Use cloud GPUs: RunPod at $0.50/hr or Vast.ai at $0.25/hr. Generate your weekly content in 1-2 hours of rental ($0.50-1.00 cost). Many professionals do this successfully.
Week 1-2: Basic workflows, understand node connections. Week 3-4: LoRA training, ControlNet basics. Month 2: Multi-ControlNet, prompt engineering mastery. Month 3: Custom nodes, batch automation. Month 4-6: Full professional setup. This assumes 10-15 hours/week practice. Fast track: Take a course or work with mentor - cuts timeline by 60%.
No, these advanced workflows are ComfyUI-specific. Midjourney doesn't support custom nodes, ControlNet, or local automation. That's precisely why serious AI influencer creators use ComfyUI - it offers control Midjourney can't match. You can use Midjourney for ideation/concept art, then recreate consistent versions in ComfyUI. They're complementary, not competing tools.
Three-layer approach: (1) Train a strong character LoRA (20-30 diverse images), (2) Use IP-Adapter with reference face image, (3) Implement multi-ControlNet with OpenPose for body + face lock for features. Weight your character embeddings at 1.3-1.5. Test thoroughly across 50+ scenarios before committing to a character. Budget 2-3 days for initial character development and testing.
Build a master batch workflow with character selection dropdown. Create separate folders: /characters/ava/, /characters/marcus/, etc. Each contains character's LoRA, embeddings, and config file. Your custom node reads the config and loads appropriate assets. Queue all characters' content at once (50-100 images), let it run overnight. Process time: 2-8 hours unattended depending on GPU.
Initial LoRA: Train once with 20-30 carefully curated images. Only retrain if: (1) Consistency drops below 90%, (2) Adding significantly different scenarios (like swim content when you only trained clothed), (3) Model updates (new SDXL version). Most successful influencers never retrain - they nail it once. If you're retraining often, your initial training data wasn't diverse enough.
Yes! Many developers sell custom nodes on Gumroad, Payhip, or directly. Price range: $20-200 depending on complexity. Alternatively, publish free on GitHub to build reputation, then offer paid consulting ($100-300/hr) for custom development. Some create premium node packs for agencies ($500-2000). Just ensure your code doesn't violate any licenses of nodes you've based yours on.
12 revenue streams to earn $10K-$100K/month with AI influencers
Step-by-step guide to creating your first AI influencer from scratch
Master the basics before diving into advanced techniques
Learn the exact workflows, custom nodes, and automation systems used by professional AI influencer agencies