Skip to content

Commit 53c726d

Browse files
committed
fix minor bug in demo - Adithya S K
1 parent 745ab2b commit 53c726d

File tree

2 files changed

+3
-60
lines changed

2 files changed

+3
-60
lines changed

demo.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,9 @@ def gradio_interface():
281281
50, 5000, value=200, step=10, label="Chunk Size (for SimpleRAG)"
282282
)
283283
ingest_button = gr.Button("Ingest PDFs")
284-
ingest_output = gr.Markdown(label="Ingestion Status", lines=10)
284+
ingest_output = gr.Markdown(
285+
label="Ingestion Status :",
286+
)
285287
progress_table = gr.DataFrame(
286288
label="Ingestion Progress", headers=["Technique", "Time Taken (s)"]
287289
)

varag/rag/_simpleRAG.py

Lines changed: 0 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -271,62 +271,3 @@ def change_table(self, new_table_name: str):
271271
self.table_name, schema=self.schema, exist_ok=True
272272
)
273273
print(f"Switched to new table: {self.table_name}")
274-
275-
276-
# Usage example:
277-
if __name__ == "__main__":
278-
from sentence_transformers import SentenceTransformer
279-
280-
# Initialize embedding model
281-
embedding_model = SentenceTransformer("all-MiniLM-L6-v2", trust_remote_code=True)
282-
283-
# Initialize TextRAG
284-
text_rag = SimpleRAG(
285-
text_embedding_model=embedding_model,
286-
db_path="~/visionrag_db",
287-
table_name="default_table",
288-
)
289-
290-
# Initialize OpenAI client
291-
llm = OpenAI()
292-
293-
# Index PDFs
294-
pdf_paths = ["path/to/pdf1.pdf", "path/to/pdf2.pdf"]
295-
result = text_rag.index(
296-
pdf_paths,
297-
recursive=True,
298-
chunking_strategy=FixedTokenChunker(chunk_size=500),
299-
metadata={"source": "example_data"},
300-
overwrite=True,
301-
verbose=True,
302-
ocr=True,
303-
)
304-
print(result)
305-
306-
# Search for relevant chunks
307-
query = "What is the main topic of the documents?"
308-
search_results = text_rag.search(query, k=5)
309-
print("Search results:", search_results)
310-
311-
# Generate response using OpenAI
312-
context = "\n".join([r["text"] for r in search_results])
313-
response = llm.chat.completions.create(
314-
model="gpt-4",
315-
messages=[
316-
{"role": "system", "content": "You are a helpful assistant."},
317-
{"role": "user", "content": f"Context: {context}\n\nQuestion: {query}"},
318-
],
319-
)
320-
print("Generated response:", response.choices[0].message.content)
321-
322-
# Add new documents to the index
323-
new_pdf_paths = ["path/to/new_pdf.pdf"]
324-
result = text_rag.add_to_index(
325-
new_pdf_paths,
326-
recursive=False,
327-
chunking_strategy=FixedTokenChunker(chunk_size=500),
328-
metadata={"project": "new_project"},
329-
verbose=True,
330-
ocr=False,
331-
)
332-
print(result)

0 commit comments

Comments
 (0)