-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Expand file tree
/
Copy path.env.example
More file actions
98 lines (84 loc) · 3.45 KB
/
.env.example
File metadata and controls
98 lines (84 loc) · 3.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# DeepResearch Environment Configuration
# Copy this file to .env and fill in your actual values
# Run: cp .env.example .env
# =============================================================================
# TORCH/NCCL Configuration (for multi-GPU setups)
# =============================================================================
TORCHDYNAMO_VERBOSE=1
TORCHDYNAMO_DISABLE=1
NCCL_IB_TC=16
NCCL_IB_SL=5
NCCL_IB_GID_INDEX=3
NCCL_SOCKET_IFNAME=eth
NCCL_DEBUG=INFO
NCCL_IB_HCA=mlx5
NCCL_IB_TIMEOUT=22
NCCL_IB_QPS_PER_CONNECTION=8
NCCL_MIN_NCHANNELS=4
NCCL_NET_PLUGIN=none
GLOO_SOCKET_IFNAME=eth0
# =============================================================================
# DeepResearch Configuration
# =============================================================================
QWEN_DOC_PARSER_USE_IDP=false
QWEN_IDP_ENABLE_CSI=false
NLP_WEB_SEARCH_ONLY_CACHE=false
NLP_WEB_SEARCH_ENABLE_READPAGE=false
NLP_WEB_SEARCH_ENABLE_SFILTER=false
QWEN_SEARCH_ENABLE_CSI=false
SPECIAL_CODE_MODE=false
PYTHONDONTWRITEBYTECODE=1
# =============================================================================
# Model and Inference Hyperparameters
# =============================================================================
MODEL_PATH=/your/model/path
DATASET=your_dataset_name
OUTPUT_PATH=/your/output/path
ROLLOUT_COUNT=3
TEMPERATURE=0.85
PRESENCE_PENALTY=1.1
MAX_WORKERS=30
# =============================================================================
# API Keys and External Services
# =============================================================================
# Serper API for web search and Google Scholar
# Get your key from: https://serper.dev/
SERPER_KEY_ID=your_key
# Jina API for web page reading
# Get your key from: https://jina.ai/
JINA_API_KEYS=your_key
# Summary model API (OpenAI-compatible) for page summarization
# Get your key from: https://platform.openai.com/
API_KEY=your_key
API_BASE=your_api_base
SUMMARY_MODEL_NAME=your_summary_model_name
# Dashscope API for file parsing (PDF, Office, etc.)
# Get your key from: https://dashscope.aliyun.com/
# Supports: qwen-omni-turbo, qwen-plus-latest
DASHSCOPE_API_KEY=your_key
DASHSCOPE_API_BASE=your_api_base
VIDEO_MODEL_NAME=your_video_model_name
VIDEO_ANALYSIS_MODEL_NAME=your_analysis_model_name
# =============================================================================
# Python Code Execution Sandbox
# =============================================================================
# SandboxFusion endpoints for Python interpreter
# Example: "http://22.16.67.220:8080,http://22.16.78.153:8080,http://22.17.10.216:8080"
# See: https://github.com/bytedance/SandboxFusion
SANDBOX_FUSION_ENDPOINT=your_sandbox_endpoint
TORCH_COMPILE_CACHE_DIR=./cache
# =============================================================================
# IDP Service (Advanced File Parsing - Optional)
# =============================================================================
# IDP service for enhanced file parsing
# Set USE_IDP=True and provide credentials for more powerful parsing
# Documentation: https://help.aliyun.com/zh/document-mind/developer-reference/use-idp-llm-to-complete-document-summary
USE_IDP=False
IDP_KEY_ID=your_idp_key_id
IDP_KEY_SECRET=your_idp_key_secret
# =============================================================================
# Multi-Worker Configuration (Optional)
# =============================================================================
# These are typically set by distributed training frameworks
# WORLD_SIZE=1
# RANK=0