Skip to content

Commit d7dc866

Browse files
authored
Merge pull request #49 from openworm/development
Development->main
2 parents 44f39ed + 6211a21 commit d7dc866

5 files changed

Lines changed: 234 additions & 47 deletions

File tree

.github/workflows/ci-test.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,12 @@ jobs:
4747
4848
4949
- name: Test interactions requiring API (requires OPENAI key)
50-
if: ${{ matrix.runs-on == 'ubuntu-latest' && matrix.python-version == '3.12' }}
50+
if: ${{ matrix.runs-on == 'ubuntu-latest' && matrix.python-version == '3.12' && env.GITHUB_REPOSITORY_OWNER == 'openworm' }}
5151
env:
5252
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
5353
run: |
54+
echo "Running (paid) API comands. Env:"
55+
env
5456
./regenerateAndTest.sh # Test the parts requiring API keys...
5557
5658

openworm_ai/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# Version of the Python module.
2-
__version__ = "0.3.2"
2+
__version__ = "0.4.0"
33

44

55
def print_(msg, print_it=True):

openworm_ai/graphrag/GraphRAG_test.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,23 @@
2424
import sys
2525
import json
2626

27-
2827
STORE_DIR = "store"
2928
SOURCE_DOCUMENT = "source document"
3029

3130
Settings.chunk_size = 3000
3231
Settings.chunk_overlap = 50
3332

3433

34+
def normalize_ollama_model_name(model: str) -> str:
35+
s = (model or "").strip()
36+
for prefix in ("Ollama:", "ollama:", "llama:"):
37+
if s.startswith(prefix):
38+
s = s[len(prefix) :].strip()
39+
return s
40+
41+
3542
def create_store(model):
36-
OLLAMA_MODEL = model.replace("Ollama:", "") if model is not LLM_GPT4o else None
43+
OLLAMA_MODEL = None if model == LLM_GPT4o else normalize_ollama_model_name(model)
3744

3845
json_inputs = glob.glob("processed/json/*/*.json")
3946

@@ -48,7 +55,7 @@ def create_store(model):
4855
print_(" Processing document: %s" % title)
4956
doc_contents = doc_model[title]
5057
src_page = doc_contents["source"]
51-
for section in doc_contents["sections"]:
58+
for section in doc_contents.get("sections", []):
5259
all_text = ""
5360
if "paragraphs" in doc_contents["sections"][section]:
5461
print_(
@@ -93,7 +100,7 @@ def create_store(model):
93100

94101

95102
def load_index(model):
96-
OLLAMA_MODEL = model.replace("Ollama:", "") if model is not LLM_GPT4o else None
103+
OLLAMA_MODEL = None if model == LLM_GPT4o else normalize_ollama_model_name(model)
97104

98105
print_("Creating a storage context for %s" % model)
99106

@@ -115,13 +122,19 @@ def load_index(model):
115122
)
116123
print_("Reloading index for %s" % model)
117124

125+
if OLLAMA_MODEL is not None:
126+
Settings.embed_model = OllamaEmbedding(model_name=OLLAMA_MODEL)
127+
118128
index_reloaded = load_index_from_storage(storage_context)
119129

120130
return index_reloaded
121131

122132

123133
def get_query_engine(index_reloaded, model, similarity_top_k=4):
124-
OLLAMA_MODEL = model.replace("Ollama:", "") if model is not LLM_GPT4o else None
134+
OLLAMA_MODEL = None if model == LLM_GPT4o else normalize_ollama_model_name(model)
135+
136+
if OLLAMA_MODEL is not None:
137+
Settings.embed_model = OllamaEmbedding(model_name=OLLAMA_MODEL)
125138

126139
print_("Creating query engine for %s" % model)
127140

@@ -147,7 +160,7 @@ def get_query_engine(index_reloaded, model, similarity_top_k=4):
147160

148161
# create a query engine for the index
149162
if OLLAMA_MODEL is not None:
150-
llm = Ollama(model=OLLAMA_MODEL, request_timeout=60.0)
163+
llm = Ollama(model=OLLAMA_MODEL, request_timeout=600.0)
151164

152165
ollama_embedding = OllamaEmbedding(
153166
model_name=OLLAMA_MODEL,

0 commit comments

Comments
 (0)