| from transformers import pipeline |
| |
| repo_id = "jonruida/model-IC" |
|
|
| query_pipeline = transformers.pipeline( |
| "text-generation", |
| model=model, |
| tokenizer=tokenizer, |
| torch_dtype=torch.float16, |
| device_map="auto", max_new_tokens=200) |
|
|
| def test_rag(pipeline, input_text): |
| docs = chroma_db/chroma.sqlite3.similarity_search_with_score(query) |
| context = [] |
| for doc,score in docs: |
| if(score<7): |
| doc_details = doc.to_json()['kwargs'] |
| context.append( doc_details['page_content']) |
| if(len(context)!=0): |
| messages = [{"role": "user", "content": "Bas谩ndote en la siguiente informaci贸n: " + "\n".join(context) + "\n Responde en castellano a la pregunta: " + query}] |
| prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) |
| answer = outputs[0]["generated_text"] |
| return answer[answer.rfind("[/INST]")+8:],docs |
| else: |
| return "No tengo informaci贸n para responder a esta pregunta",docs |
| |
|
|