diff --git a/Microservices/LLM/Initialize.py b/Microservices/LLM/Initialize.py index 61f5a67..bd4acbb 100755 --- a/Microservices/LLM/Initialize.py +++ b/Microservices/LLM/Initialize.py @@ -1,5 +1,6 @@ from pdftxt import handlePDF, handleTXT, Embed from spreadsheet import handleSS +from static.Tokens import calEtokens import pandas as pd from sql import sequel import os @@ -21,18 +22,25 @@ def __init__(self): self.eToken = 0 self.db="" self.initialized = True + self.ready=False def initdb(self, file_type, name): if(str(file_type) == 'pdf'): pdf = handlePDF(f'tmp/{name}.pdf') load = pdf.extract_text_from_pdf() - db,eToken = Embed.getEmbeddings(load) - self.eToken = eToken - self.db=db + wordCount = calEtokens(load) + if(wordCount < 5000): + db,eToken = Embed.getEmbeddings(load) + self.eToken = eToken + self.db=db + self.ready=True + else: + self.eToken = 0 + self.db=0 Free.doFree(f'tmp/{name}.pdf') def initret(self): - return self.eToken,self.db + return self.ready,self.eToken,self.db class Initxt: @@ -48,18 +56,25 @@ def __init__(self): self.eToken = 0 self.db="" self.initialized = True + self.ready=False def initdb(self, file_type, name): if(str(file_type) == 'txt'): txt = handleTXT(f'tmp/{name}.txt') load = txt.extract_text_from_txt() - db,eToken = Embed.getEmbeddings(load) - self.eToken = eToken - self.db=db + wordCount = calEtokens(load) + if(wordCount < 5000): + db,eToken = Embed.getEmbeddings(load) + self.eToken = eToken + self.db=db + self.ready=True + else: + self.eToken = 0 + self.db=0 Free.doFree(f'tmp/{name}.txt') def initret(self): - return self.eToken,self.db + return self.ready,self.eToken,self.db class Initcsv: @@ -75,18 +90,24 @@ def __init__(self): self.eToken = 0 self.db="" self.initialized = True + self.ready=False def initdb(self, file_type,name): if(str(file_type) == 'csv'): file = handleSS(f'tmp/{name}.csv') csvFile = file.loadData() - db,eToken = file.EmbedSS.getEmbeddings(csvFile) - self.eToken = eToken - self.db=db + if(calEtokens(csvFile)<5000): + db,eToken = file.EmbedSS.getEmbeddings(csvFile) + self.eToken = eToken + self.db=db + self.ready=True + else: + self.eToken=0 + self.db="" Free.doFree(f'tmp/{name}.csv') def initret(self): - return self.eToken,self.db + return self.ready,self.eToken,self.db @@ -103,19 +124,25 @@ def __init__(self): self.eToken = 0 self.db="" self.initialized = True + self.ready=False def initdb(self, file_type, name): if(str(file_type) == 'xlsx'): file = handleSS(f'tmp/{name}.xlsx') fle = pd.read_excel(f'tmp/{name}.xlsx') xlFile = file.handleExcel(fle) - db,eToken = file.EmbedSS.getEmbeddings(xlFile) - self.eToken = eToken - self.db=db + if(calEtokens(xlFile)<5000): + db,eToken = file.EmbedSS.getEmbeddings(xlFile) + self.eToken = eToken + self.db=db + self.ready=True + else: + self.eToken=0 + self.db="" Free.doFree(f'tmp/{name}.xlsx') def initret(self): - return self.eToken, self.db + return self.ready,self.eToken, self.db class Initsql: _instance = None @@ -131,20 +158,25 @@ def __init__(self): self.name="" self.initialized = True self.file=False + self.ready=False def initdb(self, file_type,name): if(str(file_type) == 'sql'): getsql= sequel(f'tmp/{name}.sql') sqliteCon = getsql.convert_mysql_to_sqlite() cToken = len(sqliteCon.split()) - sqliteFile = getsql.splite_script_to_db(f'{name}.db',sqliteCon) - if sqliteFile: - self.file = 1 + if(cToken < 5000): + sqliteFile = getsql.splite_script_to_db(f'{name}.db',sqliteCon) + if sqliteFile: + self.file = 1 + else: + self.file = 0 + self.cToken = cToken + self.name = name + self.ready=True else: - self.file = 0 - self.cToken = cToken - self.name = name + self.ready=False Free.doFree(f'tmp/{name}.sql') def initret(self): - return self.cToken, self.file, self.name + return self.ready,self.cToken, self.file, self.name diff --git a/Microservices/LLM/Tokens.py b/Microservices/LLM/Tokens.py deleted file mode 100755 index d381f8a..0000000 --- a/Microservices/LLM/Tokens.py +++ /dev/null @@ -1,6 +0,0 @@ -import math -def calTokens(file): - chunkLen=0 - for x in file: - chunkLen = chunkLen + len(x.page_content.split()) - return math.floor(chunkLen*1.334) diff --git a/Microservices/LLM/app.py b/Microservices/LLM/app.py index d732508..ec196d6 100755 --- a/Microservices/LLM/app.py +++ b/Microservices/LLM/app.py @@ -14,13 +14,13 @@ from langchain_core.runnables import RunnablePassthrough from langchain.chains import ConversationChain from langchain.memory import ConversationSummaryBufferMemory +from langchain_community.callbacks.manager import get_openai_callback from langchain_community.vectorstores import FAISS -from Tokens import calTokens from static.Filename import filename +from static.Chain import chain import os import traceback import redis -import math app = Flask(__name__) @@ -28,12 +28,12 @@ app.config['CORS_HEADERS'] = 'Content-Type' status = "active" os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY') -llm = ChatOpenAI() +llm = ChatOpenAI(max_tokens=500) redis_host = os.getenv('REDIS_HOST') redis_port = os.getenv('REDIS_PORT') -redis_client = redis.StrictRedis(host=redis_host, port=redis_port, db=0) +redis_client = redis.Redis(host=redis_host, port=redis_port, db=0) try: path = os.path.dirname(os.path.abspath(__file__)) @@ -60,7 +60,7 @@ def uploadFile(name): @app.route('/') def hello_world(): - return jsonify({"status":status,"Value":'LLM Server Running Successsfully',"Version":1.3}) + return jsonify({"status":status,"Value":'LLM Server Running Successsfully',"Version":1.4}) @app.route('/uploadfile//',methods=['POST','GET']) @cross_origin() @@ -75,9 +75,12 @@ def uploaded(file_type,sid): if(upload): file=Init() file.initdb(file_type,name) - eToken,db = file.initret() - redis_client.hset(sid, 'db', db) - redis_client.hset(sid, 'eToken', eToken) + ready,eToken,db = file.initret() + if(ready): + redis_client.hset(sid, 'db', db) + redis_client.hset(sid, 'eToken', eToken) + else: + return jsonify({"success":False,"msg":"File Size larger then 5000 tokens"}) return success else: return fail @@ -87,9 +90,12 @@ def uploaded(file_type,sid): if(upload): file = Initxt() file.initdb(file_type,name) - eToken,db = file.initret() - redis_client.hset(sid, 'db', db) - redis_client.hset(sid, 'eToken', eToken) + ready,eToken,db = file.initret() + if(ready): + redis_client.hset(sid, 'db', db) + redis_client.hset(sid, 'eToken', eToken) + else: + return jsonify({"success":False,"msg":"File Size larger then 5000 tokens"}) return success else: return fail @@ -98,9 +104,12 @@ def uploaded(file_type,sid): if(upload): file = Initcsv() file.initdb(file_type,name) - eToken,db = file.initret() - redis_client.hset(sid, 'db', db) - redis_client.hset(sid, 'eToken', eToken) + ready,eToken,db = file.initret() + if(ready): + redis_client.hset(sid, 'db', db) + redis_client.hset(sid, 'eToken', eToken) + else: + return jsonify({"success":False,"msg":"File Size larger then 5000 tokens"}) return success else: return fail @@ -109,9 +118,12 @@ def uploaded(file_type,sid): if(upload): file = Initxlsx() file.initdb(file_type,name) - eToken,db = file.initret() - redis_client.hset(sid, 'db', db) - redis_client.hset(sid, 'eToken', eToken) + ready,eToken,db = file.initret() + if(ready): + redis_client.hset(sid, 'db', db) + redis_client.hset(sid, 'eToken', eToken) + else: + return jsonify({"success":False,"msg":"File Size larger then 5000 tokens"}) return success else: return fail @@ -120,10 +132,13 @@ def uploaded(file_type,sid): if(upload): file = Initsql() file.initdb(file_type,name) - cToken,go,name = file.initret() - redis_client.hset(sid, 'go', go) - redis_client.hset(sid, 'cToken', cToken) - redis_client.hset(sid, 'name',name) + ready,cToken,go,name = file.initret() + if(ready): + redis_client.hset(sid, 'go', go) + redis_client.hset(sid, 'cToken', cToken) + redis_client.hset(sid, 'name',name) + else: + return jsonify({"success":False,"msg":"File Size larger then 5000 tokens"}) return success else: return fail @@ -134,7 +149,7 @@ def uploaded(file_type,sid): def chat(): req = request.json ques = req.get('query') - memory = ConversationSummaryBufferMemory(llm=llm,max_token_limit=100) + memory = ConversationSummaryBufferMemory(llm=llm,max_token_limit=500) conversation = ConversationChain(llm=llm,memory=memory) result = conversation.predict(input=ques) cToken = len(ques.split()) @@ -148,13 +163,12 @@ def chatpdf(sid): ques = req.get('query') eToken = int(redis_client.hget(sid, 'eToken')) db = FAISS.deserialize_from_bytes(embeddings=OpenAIEmbeddings(), serialized=redis_client.hget(sid, 'db'),allow_dangerous_deserialization=True) - doc = db.similarity_search(ques) - cToken = calTokens(doc) - chain = load_qa_chain(llm,chain_type='stuff') - result = chain.run(input_documents=doc,question=ques) - gToken = math.floor(len(result.split())*1.334) - total = cToken + eToken + gToken - res = jsonify({"result":result,"cToken":cToken+eToken,"gToken":gToken,"total":total}) + retriever = db.as_retriever(search_type='mmr',search_kwargs={'k':3}) + chains = chain(retriever, llm) + with get_openai_callback() as cb: + result = chains.invoke(ques) + total = cb.prompt_tokens + eToken + cb.completion_tokens + res = jsonify({"result":result,"cToken":cb.prompt_tokens+eToken,"gToken":cb.completion_tokens,"total":total}) return (res) @app.route('/chat/txt/', methods=['POST', 'GET']) @@ -163,13 +177,12 @@ def chattxt(sid): ques = req.get('query') eToken = int(redis_client.hget(sid, 'eToken')) db = FAISS.deserialize_from_bytes(embeddings=OpenAIEmbeddings(), serialized=redis_client.hget(sid, 'db'),allow_dangerous_deserialization=True) - doc = db.similarity_search(ques) - cToken = calTokens(doc) - chain = load_qa_chain(llm,chain_type='stuff') - result = chain.run(input_documents=doc,question=ques) - gToken = math.floor(len(result.split())*1.334) - total = cToken + eToken + gToken - res = jsonify({"result":result,"cToken":cToken+eToken,"gToken":gToken,"total":total}) + retriever = db.as_retriever(search_type='mmr',search_kwargs={'k':3}) + chains = chain(retriever, llm) + with get_openai_callback() as cb: + result = chains.invoke(ques) + total = cb.prompt_tokens + eToken + cb.completion_tokens + res = jsonify({"result":result,"cToken":cb.prompt_tokens+eToken,"gToken":cb.completion_tokens,"total":total}) return (res) @app.route('/chat/csv/', methods=['POST', 'GET']) @@ -178,13 +191,13 @@ def chatcsv(sid): ques = req.get('query') eToken = int(redis_client.hget(sid, 'eToken')) db = FAISS.deserialize_from_bytes(embeddings=OpenAIEmbeddings(), serialized=redis_client.hget(sid, 'db'),allow_dangerous_deserialization=True) - doc = db.similarity_search(ques) - cToken = calTokens(doc) - chain = load_qa_chain(llm,chain_type='stuff') - result = chain.run(input_documents=doc,question=ques) - gToken = math.floor(len(result.split())*1.334) - total = cToken + eToken + gToken - res = jsonify({"result":result,"cToken":cToken+eToken,"gToken":gToken,"total":total}) + retriever = db.as_retriever(search_type='mmr',search_kwargs={'k':3}) + chains = chain(retriever, llm) + with get_openai_callback() as cb: + result = chains.invoke(ques) + total = cb.prompt_tokens + eToken + cb.completion_tokens + print(eToken, cb.prompt_tokens,cb.completion_tokens) + res = jsonify({"result":result,"cToken":cb.prompt_tokens+eToken,"gToken":cb.completion_tokens,"total":total}) return (res) @app.route('/chat/xlsx/', methods=['POST', 'GET']) @@ -193,13 +206,12 @@ def chatxlsx(sid): ques = req.get('query') eToken = int(redis_client.hget(sid, 'eToken')) db = FAISS.deserialize_from_bytes(embeddings=OpenAIEmbeddings(), serialized=redis_client.hget(sid, 'db'),allow_dangerous_deserialization=True) - doc = db.similarity_search(ques) - cToken = calTokens(doc) - chain = load_qa_chain(llm,chain_type='stuff') - result = chain.run(input_documents=doc,question=ques) - gToken = math.floor(len(result.split())*1.334) - total = cToken + eToken + gToken - res = jsonify({"result":result,"cToken":cToken+eToken,"gToken":gToken,"total":total}) + retriever = db.as_retriever(search_type='mmr',search_kwargs={'k':3}) + chains = chain(retriever, llm) + with get_openai_callback() as cb: + result = chains.invoke(ques) + total = cb.prompt_tokens + eToken + cb.completion_tokens + res = jsonify({"result":result,"cToken":cb.prompt_tokens+eToken,"gToken":cb.completion_tokens,"total":total}) return (res) @app.route('/chat/sql/', methods=['POST', 'GET']) diff --git a/Microservices/LLM/pdftxt.py b/Microservices/LLM/pdftxt.py index a2738a8..bfba867 100755 --- a/Microservices/LLM/pdftxt.py +++ b/Microservices/LLM/pdftxt.py @@ -2,11 +2,14 @@ from langchain_community.vectorstores import FAISS from langchain.text_splitter import CharacterTextSplitter from langchain_community.document_loaders import TextLoader -from Tokens import calTokens +from static.Tokens import calEtokens import fitz # PyMuPDF import pytesseract from pdf2image import convert_from_path +from PIL import Image +import io +import re @@ -23,37 +26,40 @@ class handlePDF: def __init__(self,path): self.path=path # Function to determine PDF type + def ocr_image(self,image): + return pytesseract.image_to_string(image) + + def clean_text(self,text): + text = re.sub(r'\n\s*\n', '\n', text) + text = re.sub(r'[ \t]+', ' ', text) + text = text.strip() + return text + + def extract_text_from_pdf(self): - """Returns extrcated text from PDF only""" - file_path = self.path - pdf_document = fitz.open(file_path) - page = pdf_document.load_page(0) # Load the first page - text = page.get_text() - pdf_document.close() - - if text.strip(): - # Use fitz (PyMuPDF) for digital text PDF - return self.extract_text_with_fitz(file_path) - else: - # Use OCR for scanned PDF - return self.extract_text_with_ocr(file_path) - - def extract_text_with_fitz(self,file_path): - pdf_document = fitz.open(file_path) - text = "" + pdfText="" + pdf_document = fitz.open(self.path) for page_num in range(pdf_document.page_count): page = pdf_document.load_page(page_num) - text += page.get_text() - pdf_document.close() - result = [Document(page_content=text, metadata={'source': self.path, 'page': len(text)})] - return result - def extract_text_with_ocr(self,file_path): - images = convert_from_path(file_path) - text = "" - for image in images: - text += pytesseract.image_to_string(image) - result = [Document(page_content=text, metadata={'source': self.path, 'page': len(text)})] + # Extract text from the page + text = page.get_text() + pdfText = pdfText+text + pdfText = pdfText+'\n' + + # Extract images from the page + images = page.get_images(full=True) + for img_index, img in enumerate(images): + xref = img[0] + base_image = pdf_document.extract_image(xref) + image_bytes = base_image["image"] + image = Image.open(io.BytesIO(image_bytes)) + ocr_text = self.ocr_image(image) + pdfText = pdfText+ocr_text + pdfText = pdfText+'\n' + pdf_document.close() + pdfText = self.clean_text(pdfText) + result = [Document(page_content=pdfText, metadata={'source': self.path, 'page': len(pdfText.split())})] return result @@ -62,7 +68,7 @@ def __init__(self,path): self.path=path def extract_text_from_txt(self): - """Returns extrcated text from TXT only""" + """Returns extracted text from TXT only""" loader = TextLoader(self.path) text = loader.load() return text @@ -75,6 +81,6 @@ def getEmbeddings(load): embedding_function = OpenAIEmbeddings() db = FAISS.from_documents(docs, embedding_function) pkl = db.serialize_to_bytes() - eTokens = calTokens(docs) + eTokens = calEtokens(load) return pkl,eTokens diff --git a/Microservices/LLM/spreadsheet.py b/Microservices/LLM/spreadsheet.py index ac58ad3..04022d4 100755 --- a/Microservices/LLM/spreadsheet.py +++ b/Microservices/LLM/spreadsheet.py @@ -4,7 +4,8 @@ from langchain_community.vectorstores import FAISS from langchain_community.document_loaders import CSVLoader, UnstructuredExcelLoader from langchain.text_splitter import CharacterTextSplitter -from Tokens import calTokens +from static.Tokens import calEtokens + class Document: @@ -84,7 +85,7 @@ def getEmbeddings(file): # load it into Chroma db = FAISS.from_documents(chunks, embedding_function) pkl = db.serialize_to_bytes() - eToken = calTokens(chunks) + eToken = calEtokens(file) return pkl,eToken diff --git a/Microservices/LLM/sql.py b/Microservices/LLM/sql.py index 612e0b5..ee72fa1 100755 --- a/Microservices/LLM/sql.py +++ b/Microservices/LLM/sql.py @@ -23,7 +23,14 @@ def splite_script_to_db(self,db_name,sqlFile): conn.close() return mesg - + def replace_primary_key(self, match): + constraint_name = match.group(1) + column_name = match.group(2) + return f"PRIMARY KEY ({column_name})" + + def remove_n_prefix(self,match): + return "'" + match.group(1) + "'" + def convert_mysql_to_sqlite(self): mysql_sql_path = self.path # Function to convert MySQL SQL dump to SQLite SQL @@ -39,7 +46,8 @@ def convert_mysql_to_sqlite(self): r'\bVARCHAR\((\d+)\)\b': 'TEXT', r'\bDATETIME\b': 'TEXT', # SQLite can store dates as TEXT r'\bTIMESTAMP\b': 'TEXT', - r'\bDOUBLE\b': 'REAL' + r'\bDOUBLE\b': 'REAL', + r"NVARCHAR\(\d+\)": "TEXT" } for mysql_type, sqlite_type in type_mappings.items(): @@ -47,12 +55,17 @@ def convert_mysql_to_sqlite(self): mysql_sql = re.sub(r'`', '', mysql_sql) # Remove backticks mysql_sql = re.sub(r'\bENGINE=\w+\b', '', mysql_sql, flags=re.IGNORECASE) + mysql_sql = re.sub(r"DROP\s+DATABASE\s+IF\s+EXISTS\s+\w+\s*;", '', mysql_sql, flags=re.IGNORECASE) + mysql_sql = re.sub(r"CREATE\s+DATABASE\s+\w+\s*;",'', mysql_sql, flags=re.IGNORECASE) + mysql_sql = re.sub(r"USE\s+\w+\s*;",'', mysql_sql, flags=re.IGNORECASE ) + mysql_sql = re.sub(r"CONSTRAINT\s+PK_(\w+)\s+PRIMARY\s+KEY\s+\(\s*(\w+(?:,\s*\w+)*)\s*\)",self.replace_primary_key,mysql_sql, flags=re.IGNORECASE) mysql_sql = re.sub(r'\bDEFAULT CHARSET=\w+\b', '', mysql_sql, flags=re.IGNORECASE) mysql_sql = re.sub(r'\bCOLLATE=\w+\b', '', mysql_sql, flags=re.IGNORECASE) mysql_sql = re.sub(r'\bCHARACTER SET \w+\b', '', mysql_sql, flags=re.IGNORECASE) mysql_sql = re.sub(r'\bLOCK TABLES\b.*?;', '', mysql_sql, flags=re.IGNORECASE | re.DOTALL) mysql_sql = re.sub(r'\bUNLOCK TABLES\b;', '', mysql_sql, flags=re.IGNORECASE) - + mysql_sql = re.sub(r"ALTER\s+TABLE\s+(\w+)\s+ADD\s+CONSTRAINT\s+(\w+)\s+FOREIGN\s+KEY\s+\(\s*(\w+)\s*\)\s+REFERENCES\s+(\w+)\s*\(\s*(\w+)\s*\)\s+ON\s+DELETE\s+(NO ACTION|CASCADE|SET NULL|SET DEFAULT)\s+ON\s+UPDATE\s+(NO ACTION|CASCADE|SET NULL|SET DEFAULT)\s*;",r"/* SQLite does not support ALTER TABLE ADD CONSTRAINT */",mysql_sql, flags=re.IGNORECASE) + mysql_sql = re.sub(r"N'((?:[^']|'')*)'", self.remove_n_prefix, mysql_sql, flags=re.IGNORECASE) # Replace NOW() with CURRENT_TIMESTAMP mysql_sql = re.sub(r'\bNOW\(\)\b', 'CURRENT_TIMESTAMP', mysql_sql, flags=re.IGNORECASE) @@ -60,4 +73,5 @@ def convert_mysql_to_sqlite(self): mysql_sql = "PRAGMA foreign_keys = ON;\n" + mysql_sql return mysql_sql + diff --git a/Microservices/LLM/static/Chain.py b/Microservices/LLM/static/Chain.py new file mode 100644 index 0000000..438b820 --- /dev/null +++ b/Microservices/LLM/static/Chain.py @@ -0,0 +1,28 @@ +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnablePassthrough +from langchain.prompts.chat import ( + ChatPromptTemplate, + SystemMessagePromptTemplate, +) + + +def chain(retriever, llm): + template = """Answer the question based only on the following context in the document provided by the user: + + {context} + + Question: {question} + """ + template_S = "You are a helpful assistant that performs information retrieval from the provided document." + system_message_prompt = SystemMessagePromptTemplate.from_template(template_S) + + + prompt = ChatPromptTemplate.from_messages([system_message_prompt, template]) + chain = ( + {"context": retriever, "question": RunnablePassthrough()} + | prompt + | llm + | StrOutputParser() + ) + + return chain diff --git a/Microservices/LLM/static/Tokens.py b/Microservices/LLM/static/Tokens.py new file mode 100644 index 0000000..2f001a1 --- /dev/null +++ b/Microservices/LLM/static/Tokens.py @@ -0,0 +1,13 @@ +import tiktoken +import math + +def calEtokens(load): + encoding = tiktoken.get_encoding("cl100k_base") + chunkLen=0 + for x in load: + chunkLen = chunkLen + len(encoding.encode(x.page_content)) + return math.floor(chunkLen/2.2) + + + + \ No newline at end of file diff --git a/Microservices/LLM/static/__pycache__/Chain.cpython-310.pyc b/Microservices/LLM/static/__pycache__/Chain.cpython-310.pyc new file mode 100644 index 0000000..488b429 Binary files /dev/null and b/Microservices/LLM/static/__pycache__/Chain.cpython-310.pyc differ diff --git a/Microservices/LLM/static/__pycache__/Tokens.cpython-310.pyc b/Microservices/LLM/static/__pycache__/Tokens.cpython-310.pyc new file mode 100644 index 0000000..a9ed38b Binary files /dev/null and b/Microservices/LLM/static/__pycache__/Tokens.cpython-310.pyc differ diff --git a/Microservices/User_auth/models/User.js b/Microservices/User_auth/models/User.js index 80a59cf..4d7c835 100755 --- a/Microservices/User_auth/models/User.js +++ b/Microservices/User_auth/models/User.js @@ -17,6 +17,10 @@ const UserSchema = new Schema({ type:String, required:true }, + userType:{ + type:String, + default:'free' + }, currentToken:{ cToken:{ type:Long, diff --git a/src/Components/Faq.js b/src/Components/Faq.js index 5843950..a08ecf5 100644 --- a/src/Components/Faq.js +++ b/src/Components/Faq.js @@ -24,7 +24,7 @@ const Faq = () => {
Docschat Support five types of Documents
    -
  • PDF : PDF files with no images
  • +
  • PDF : Any type of PDF files: text, images or mixed
  • TXT : Any type of text file
  • CSV : Any type of CSV file
  • Excel : Any type of Excel file
  • diff --git a/src/Components/Navbar.js b/src/Components/Navbar.js index a91180f..743dc15 100755 --- a/src/Components/Navbar.js +++ b/src/Components/Navbar.js @@ -120,7 +120,7 @@ const Navbar = (props) => {
+

*More Tokens can be taken by clicking Get More Tokens from dropdown

Disclaimer : As of Now No plans are added, all services are free

diff --git a/src/Components/TaskItem.js b/src/Components/TaskItem.js index 07faff6..d349b9f 100755 --- a/src/Components/TaskItem.js +++ b/src/Components/TaskItem.js @@ -15,8 +15,7 @@ const TaskItem = (props) => { const [disable,setDisable] = useState(null) const navigate= useNavigate(); const dispatch = useDispatch() - // const type = useSelector(state => state.type) - // const actions = bindActionCreators(actionCreator, dispatch); + const [status, setStatus] = useState("") const handleDOCChange = (event) => { setPdfFile(event.target.files[0]); @@ -50,11 +49,13 @@ const TaskItem = (props) => { navigate('/login') alert("Kindly Login First","primary") }else{ + setStatus("checking file type") if(pdfFile === null){ alert("kindly select file","danger") }else if(pdfFile.type !== type){ alert("Kindly Select Appropriate File","danger") - }else{ + }else{ + setStatus("uploading") setLoader("spinner-border spinner-border-sm me-2") setDisable(true) const ready = await checkUser(100) @@ -66,7 +67,6 @@ const TaskItem = (props) => { const formData = new FormData(); formData.append('file', pdfFile); - const response = await fetch(`${llm_host}/uploadfile/${btnRef}/${Sid}`, { method: "POST", body:formData @@ -99,6 +99,7 @@ const TaskItem = (props) => { } } } + setStatus("") }; return (
@@ -120,8 +121,9 @@ const TaskItem = (props) => {
- +
+

{status}

diff --git a/src/Styling/pricing.css b/src/Styling/pricing.css index 808d73b..5da5c4f 100644 --- a/src/Styling/pricing.css +++ b/src/Styling/pricing.css @@ -4,7 +4,7 @@ flex-wrap: wrap; position: relative; row-gap: 10px; - column-gap: 90px; + column-gap: 70px; width: auto; justify-content: center; align-items: center; @@ -15,9 +15,6 @@ width:300px; position: relative; background: linear-gradient(10deg, rgba(11, 86, 90, 0.192) 0%, rgba(90, 98, 189, 0.164) 35%, rgb(4, 136, 114) 100%); - /* margin:0 0 30px; */ - /* background: rgb(34,193,195); */ - /* background: linear-gradient(0deg, rgba(34,193,195,1) 0%, rgba(111,106,96,1) 100%); */ padding-top: 10px; box-shadow: 0 15px 25px rgba(0, 0, 0, 0.1); padding-bottom: 10px; @@ -29,4 +26,11 @@ align-items: center; justify-content: center; /* color: white; */ +} + +#pricing button{ + width:150px; + border-radius: 20px; + height:30px; + font-size: small; } \ No newline at end of file