Skip to content

Commit 862a4dd

Browse files
committed
Updated generate random string function
1 parent 47cd96e commit 862a4dd

File tree

2 files changed

+27
-21
lines changed

2 files changed

+27
-21
lines changed

shuffle-ai/1.0.0/src/app.py

Lines changed: 26 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -51,21 +51,21 @@ def load_llm_model(model):
5151
}
5252

5353
# Check for GPU layers
54-
llm = None
54+
innerllm = None
5555
gpu_layers = os.getenv("GPU_LAYERS")
5656
if gpu_layers:
5757
gpu_layers = int(gpu_layers)
5858
if gpu_layers > 0:
5959
print("GPU Layers: %s" % gpu_layers)
60-
llm = llama_cpp.Llama(model_path=model, n_gpu_layers=gpu_layers)
60+
innerllm = llama_cpp.Llama(model_path=model, n_gpu_layers=gpu_layers)
6161
else:
62-
llm = llama_cpp.Llama(model_path=model)
62+
innerllm = llama_cpp.Llama(model_path=model)
6363
else:
6464
# Check if GPU available
6565
#print("No GPU layers set.")
66-
llm = llama_cpp.Llama(model_path=model)
66+
innerllm = llama_cpp.Llama(model_path=model)
6767

68-
return llm
68+
return innerllm
6969

7070
llm = load_llm_model(model)
7171

@@ -76,8 +76,6 @@ class Tools(AppBase):
7676
def __init__(self, redis, logger, console_logger=None):
7777
super().__init__(redis, logger, console_logger)
7878

79-
#def run_llm(self, question, model="llama3.2"):
80-
#def run_llm(self, question, model="deepseek-v3"):
8179
def run_llm(self, input, system_message=""):
8280
global llm
8381
global model
@@ -88,19 +86,27 @@ def run_llm(self, input, system_message=""):
8886
self.logger.info("[DEBUG] Running LLM with model '%s'. To overwrite path, use environment variable MODEL_PATH=<path>" % model)
8987

9088
# https://github.com/abetlen/llama-cpp-python
91-
output = llm.create_chat_completion(
92-
max_tokens=100,
93-
messages = [
94-
{
95-
"role": "system",
96-
"content": system_message,
97-
},
98-
{
99-
"role": "user",
100-
"content": input,
101-
}
102-
]
103-
)
89+
try:
90+
self.logger.info("[DEBUG] LLM: %s" % llm)
91+
output = llm.create_chat_completion(
92+
max_tokens=100,
93+
messages = [
94+
{
95+
"role": "system",
96+
"content": system_message,
97+
},
98+
{
99+
"role": "user",
100+
"content": input,
101+
}
102+
]
103+
)
104+
except Exception as e:
105+
return {
106+
"success": False,
107+
"reason": f"Failed to run local LLM. Check logs in this execution for more info: {self.current_execution_id}",
108+
"details": f"{e}"
109+
}
104110

105111
self.logger.info("[DEBUG] LLM output: %s" % output)
106112

shuffle-tools/1.2.0/src/app.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2546,7 +2546,7 @@ def get_standardized_data(self, json_input, input_type):
25462546
"changed_fields": important_fields,
25472547
}
25482548

2549-
def generate_random_string(length=16, special_characters=True):
2549+
def generate_random_string(self, length=16, special_characters=True):
25502550
try:
25512551
length = int(length)
25522552
except:

0 commit comments

Comments
 (0)