forked from ryan-blunden/django-chatgpt-clone
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsample.env
162 lines (138 loc) · 5.65 KB
/
sample.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
DJANGO_ADMIN_URL="79b780ab0b79492ebe031eafe9de0af4/"
# wildcard for subdomain is: ".domain.tld" (without the quotes, but do not add the '*')
DJANGO_ALLOWED_HOSTS="127.0.0.1,localhost,0.0.0.0,.domain.tld"
DJANGO_DEBUG="False"
DJANGO_SECRET_KEY="f4894ca8-07c8-4c57-ad50-5c9f186b873f"
DJANGO_SERVER_EMAIL="application@chatgpt.local"
DJANGO_SETTINGS_MODULE="llm_portal.settings.production"
OPENAI_API_BASE_URL="https://api.openai.com"
OPENAI_API_KEY="*********************************************"
AZURE_OPENAI_ENDPOINT="https://RESOURCE.openai.azure.com"
AZURE_OPENAI_API_KEY="***************************************************"
AZURE_OPENAI_API_VERSION="2023-12-01-preview"
# USED BY DOCKER / NGINX !
HOST="localhost"
PORT="8000"
NGINX_PORT=443
WEB_CONCURRENCY="2"
# doc conversion
LIBRE_OFFICE_PATH="/Applications/LibreOffice.app/Contents/MacOS/soffice"
## APP stuff
APP_NAME="test"
# Leave commented out if you have the passwords set in the fixtures
PASSWORD_INIT_USERS="1234"
# you can set here the list of vector stores you want to use
# this allow the indexing phase to mutualize the received embeddings and store them in multiple stores at once ...
# default , simple (=default), qdrant , pgvector, chroma, elastic
RAG_VECTOR_STORES="pgvector,qdrant,elastic"
RAG_VECTOR_STORE="pgvector"
# 'mongodb' or 'postgres'
RAG_DOCUMENT_STORE="postgres"
# 'mongodb' or 'postgres'
RAG_INDEX_STORE="postgres"
PGVECTOR_DB="pgvectordb"
PGVECTOR_HOST="127.0.0.1"
PGVECTOR_PORT="5432"
PGVECTOR_USER="pgvectoruser"
PGVECTOR_PASSWORD="GOODPASSWORD_PGVECTOR"
PGVECTOR_TEXT_SEARCH_CONFIG="english"
# FOR NOW, the docker-compose use the same server for both docstore and INDEXSTORE_DB
# make sure they are the same host,user,password and only the DB name changes
DOCSTORE_DB="mongodocstoredb"
DOCSTORE_HOST="127.0.0.1"
DOCSTORE_PORT="27017"
DOCSTORE_USER="mongouser"
DOCSTORE_PASSWORD="GOODPASSWORD_DOC"
INDEXSTORE_DB="mongoindexstoredb"
INDEXSTORE_HOST="127.0.0.1"
INDEXSTORE_PORT="27017"
INDEXSTORE_USER="mongouser"
INDEXSTORE_PASSWORD="GOODPASSWORD_INDEX"
QDRANT_HOST="localhost"
ELASTIC_SEARCH_PORT=9200
ELASTIC_SEARCH_PASSWORD=YOUR_PASSWORD
# used for production when switching from sqlite3 to a proper DB (if specified)
DB_ENGINE="django.db.backends.postgresql"
DB_HOST="localhost"
DB_NAME="pgsqldb"
DB_PORT=5333
DB_USER="pgsql"
DB_PASSWORD="GOODPASSWORD_PGSQL"
PGBOUNCER_PORT=6432
PGBOUNCER_POOL_PREFIX="pgbouncer_"
# NOTE:
# if you get Error code: 404 - {'error': {'code': '404', 'message': 'Resource not found'}}
# check https://learn.microsoft.com/en-us/azure/ai-services/openai/reference
# it is often the version that is wrong: https://stackoverflow.com/questions/75172116/why-im-getting-404-resource-not-found-to-my-newly-azure-openai-deployment
#
# FORMAT:
# provider:model@deployment or provider:model
#
# LLM_MODEL examples:
# "azure_openai:gpt-4@DEPLOYMENT"
# "openai:gpt-3.5-turbo-1106"
# "openai:gpt-4-1106-preview"
# "ollama:mistral"
# "vllm:mistralai/Mistral-7B-Instruct-v0.1"
# ...
# LLM_EMBEDDINGS examples:
# "local:local"
# "azure_openai:text-embedding-ada-002@km-ada002"
# "openai:text-embedding-ada-002"
# "hugging_face:..."
# ...
LLM_MODEL="azure_openai:gpt-4@km-gpt4-1106"
# DEPRECATED
# LLM_EMBEDDING="azure_openai:text-embedding-ada-002@km-ada002"
LLM_SYSTEM_MESSAGE="You are a helpful assistant. You will respond in a cheerful, friendly tone with a succint style. Your responses should aim to be three paragraphs or less."
LLM_TEMPERATURE=0.15
LLM_TOKEN_COUNTER="True"
PORTAL_REGISTER_DISABLED=1
PORTAL_IMAGE="portal_image.svg"
PORTAL_COLOR_HEADER=
LOCUST_USER_ADMIN="admin@company.com"
LOCUST_USER_NORMAL="team1@company.com"
LOCUST_PWD_ADMIN="1234"
LOCUST_PWD_NORMAL="1234"
LOCUST_AUTH="HTTP_AUTH_USER:HTTP_AUTH_PASSWORD"
# will use scripts/libraries that have been pre-downloaded
OFFLINE_MODE="False"
# tells the system if the conversations should be preloaded or fetched when clicked
CONVERSATIONS_LOAD_ALL="False"
# Uncomment and set value if you want to change to a static file server
#STATIC_URL=https://assets.mydomain.com/
#STATIC_ROOT="/home/www-data/staticfiles/"
# Continue to deliver static file even in production (non-debug)
STATIC_SERVE_PRODUCTION="False"
# MS Graph stuff
# MS_GRAPH_TENANT_ID=“****”
# MS_GRAPH_APP_ID=“****”
# MS_GRAPH_SECRET="******"
# LEGACY STUFF to be able to roll back
# -------------------------------------------------------------------------------------------
# LLM_MODEL="openai:gpt-4-1106-preview"
# LLM_EMBEDDINGS="openai:text-embedding-ada-002"
# LLM_MODEL="openai:gpt-4-1106-preview"
# LLM_EMBEDDINGS="openai:text-embedding-ada-002"
# OPENAI_SYSTEM_MESSAGE="You are a helpful assistant. You will respond in a cheerful, friendly tone with a succint style. Your responses should aim to be three paragraphs or less."
# OPENAI_MODEL="gpt-4-1106-preview"
# OPENAI_TEMPERATURE=0.3
# STORE="simple"
# LLAMA_INDEX_VECTOR_STORES="pgvector,qdrant"
# LLAMA_INDEX_VECTOR_STORE="pgvector"
# LLAMA_INDEX_DOCUMENT_STORE="mongodb"
# LLAMA_INDEX_INDEX_STORE="mongodb"
# LLAMA_INDEX_INDEX="vector"
# PORTAL_NAME="Knowledge Portal"
# APP_AUTO_LANGUAGES="en,fr"
# default loaders
# RAG_LOADING_STRATEGIES="default_v1"
# # static|semantic|dynamic{ ... parameters_json_like ...}
# # !! for 'parameters_json_like', replace ", USE ` and not ' !!!!
# RAG_CHUNKING_STRATEGIES="static|static{`chunk_size`:800,`chunk_overlap`:100}"
# # indexing filters to filter out some combinations of parameters
# # RAG_INDEXING_CONFIGURATIONS_FILTER="___.*default.*static.*|___.*advanced.*semantic.*|en_.*default.*semantic.*"
# # 'vector', 'tree', 'summary'
# RAG_INDEXES="vector"
# RAG_EMBEDDING_MODELS="azure_openai:text-embedding-ada-002@km-ada002|hugging_face:Salesforce/SFR-Embedding-Mistral"
# RAG_VECTOR_STORE_HYBRID="True"