diff --git a/.gitignore b/.gitignore index b01bd0a9..a31ae91b 100644 --- a/.gitignore +++ b/.gitignore @@ -75,4 +75,5 @@ build/ .idea bin/ dist/ -application-local.yaml \ No newline at end of file +application-local.yaml +service/python/config.json diff --git a/K8S.md b/K8S.md index 910fb93e..ba4e3bc8 100644 --- a/K8S.md +++ b/K8S.md @@ -48,7 +48,7 @@ Change to the new folder: cd oci-generative-ai-jet-ui ``` -Install Node.js 16 on Cloud Shell. +Install Node.js 18 on Cloud Shell. ```bash nvm install 18 && nvm use 18 @@ -130,6 +130,7 @@ Run `get deploy` a few times: ```bash kubectl get deploy -n backend +kubectl get pods -n backend ``` Wait for all deployments to be `Ready` and `Available`. diff --git a/LOCAL.md b/LOCAL.md index d97c2359..0c3f1134 100644 --- a/LOCAL.md +++ b/LOCAL.md @@ -1,4 +1,13 @@ -# Run Local +# Running Java Backend for Local Development + +This guide provides step-by-step instructions for running the Java backend application locally for development purposes. + +Prerequisites +Ensure you have the following installed on your system: +Java Development Kit (JDK) 11 or later +Gradle 6.8 or later +Oracle JDBC Driver (if using an Oracle database) +Familiarize yourself with the project structure and configuration files. ## Run components diff --git a/app/package.json b/app/package.json index 53c11b5a..b98c5b0e 100644 --- a/app/package.json +++ b/app/package.json @@ -1,6 +1,6 @@ { "name": "JETGenAI", - "version": "1.0.0", + "version": "1.0.2", "description": "Sample Client app showing communication with OCI Generative AI services via Websocket", "dependencies": { "@oracle/oraclejet": "~16.1.0", diff --git a/app/src/components/content/answer.tsx b/app/src/components/content/answer.tsx index 64276a6e..c15ad0c3 100644 --- a/app/src/components/content/answer.tsx +++ b/app/src/components/content/answer.tsx @@ -43,22 +43,22 @@ export const Answer = ({ item, sim }: Props) => { )} {!sim && (
  • -
    -
    +
    +
    -
    + {/*
    -
    +
    */}
  • )} diff --git a/app/src/components/content/chat.tsx b/app/src/components/content/chat.tsx index 2ad52692..3a64c4a1 100644 --- a/app/src/components/content/chat.tsx +++ b/app/src/components/content/chat.tsx @@ -68,31 +68,43 @@ export const Chat = ({ testId, data, questionChanged, question }: Props) => { ); }; + const handleQuestionChange = (event: any) => { + const newValue = event.detail.value.trim(); + if (newValue !== "") { + questionChanged(event); + question.current = ""; // Clear the input field after adding the question + } + }; + return ( - <> -
    - - - - +
    +
    +
    + + + + +
    +
    + +
    - - +
    ); -}; +}; \ No newline at end of file diff --git a/app/src/components/content/settings.tsx b/app/src/components/content/settings.tsx index 269f66d3..babb9f71 100644 --- a/app/src/components/content/settings.tsx +++ b/app/src/components/content/settings.tsx @@ -81,9 +81,8 @@ export const Settings = (props: Props) => { const json = await response.json(); const result = json.filter((model: Model) => { if ( - model.capabilities.includes("TEXT_GENERATION") && - (model.vendor == "cohere" || model.vendor == "") && - model.version != "14.2" + model.capabilities.includes("CHAT") && + (model.vendor == "cohere" || model.vendor == "meta") ) return model; }); diff --git a/app/src/styles/app.css b/app/src/styles/app.css index 71bf5677..bfb3252a 100644 --- a/app/src/styles/app.css +++ b/app/src/styles/app.css @@ -34,6 +34,13 @@ header.oj-web-applayout-header { oj-sample-markdown-viewer .legacyStyling pre { color: black; + white-space:pre-wrap; + margin: unset; + +} +oj-sample-markdown-viewer .legacyStyling p { + margin-right: 10px; + } .copy-to-clip-btn { @@ -52,6 +59,7 @@ oj-sample-markdown-viewer .legacyStyling pre { user-select: text; } + .demo-bg-main { background-color: rgb(var(--oj-palette-neutral-rgb-10)); box-shadow: var(--oj-core-box-shadow-md); @@ -78,10 +86,14 @@ samp { margin: 20px 10px 50px 10px; border: 2px black; border-radius: 10px; - padding: 10px 10px 10px 65px; + padding: 10px; text-align: start; max-width: 1440px; } + +.demo-answer-padding { + margin-top:65px; +} .demo-sim-answer-layout { min-height: 50px; display: inherit; @@ -92,6 +104,7 @@ samp { text-align: end; max-width: 1440px; } + .demo-question-layout { min-height: 50px; /* background-color: #226b95; */ @@ -114,13 +127,13 @@ samp { } .demo-chat-layout { - max-height: 100%; - height: 80%; - min-height: 400px; + max-height: 80%; + height: 20%; + min-height: calc(100vh - 200px); } .demo-no-data-layout { - height: 100%; + height: 50%; align-items: center; } @@ -178,3 +191,4 @@ html[dir="rtl"] .demo-oracle-icon { transform: translate(-25%, -25%) scale(0.5); } } + diff --git a/architecture.drawio b/architecture.drawio index 2adbfe68..5485bb9c 100644 --- a/architecture.drawio +++ b/architecture.drawio @@ -1,32 +1,32 @@ - + - + - + - + - + - + - + - + - + @@ -36,118 +36,118 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -250,637 +250,637 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/GenAIController.java b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/GenAIController.java index a2e2e51f..5a3f69f8 100644 --- a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/GenAIController.java +++ b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/GenAIController.java @@ -8,6 +8,7 @@ import com.oracle.bmc.generativeai.responses.ListEndpointsResponse; import dev.victormartin.oci.genai.backend.backend.dao.GenAiModel; import dev.victormartin.oci.genai.backend.backend.dao.GenAiEndpoint; +import dev.victormartin.oci.genai.backend.backend.service.GenAIModelsService; import dev.victormartin.oci.genai.backend.backend.service.GenAiClientService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -29,19 +30,16 @@ public class GenAIController { @Autowired private GenAiClientService generativeAiClientService; + @Autowired + private GenAIModelsService genAIModelsService; + @GetMapping("/api/genai/models") public List getModels() { logger.info("getModels()"); - ListModelsRequest listModelsRequest = ListModelsRequest.builder().compartmentId(COMPARTMENT_ID).build(); - GenerativeAiClient client = generativeAiClientService.getClient(); - ListModelsResponse response = client.listModels(listModelsRequest); - return response.getModelCollection().getItems().stream().map(m -> { - List capabilities = m.getCapabilities().stream().map(ModelCapability::getValue) - .collect(Collectors.toList()); - GenAiModel model = new GenAiModel(m.getId(), m.getDisplayName(), m.getVendor(), m.getVersion(), - capabilities, m.getTimeCreated()); - return model; - }).collect(Collectors.toList()); + List models = genAIModelsService.getModels(); + return models.stream() + .filter(m -> m.capabilities().contains("CHAT")) + .collect(Collectors.toList()); } @GetMapping("/api/genai/endpoints") diff --git a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/PromptController.java b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/PromptController.java index c7005f74..1adb7080 100644 --- a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/PromptController.java +++ b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/controller/PromptController.java @@ -61,7 +61,7 @@ public Answer handlePrompt(Prompt prompt) { throw new InvalidPromptRequest(); } saved.setDatetimeResponse(new Date()); - String responseFromGenAI = genAI.resolvePrompt(promptEscaped, activeModel, finetune); + String responseFromGenAI = genAI.resolvePrompt(promptEscaped, activeModel, finetune, false); saved.setResponse(responseFromGenAI); interactionRepository.save(saved); return new Answer(responseFromGenAI, ""); diff --git a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/GenAIModelsService.java b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/GenAIModelsService.java new file mode 100644 index 00000000..b52c4aee --- /dev/null +++ b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/GenAIModelsService.java @@ -0,0 +1,44 @@ +package dev.victormartin.oci.genai.backend.backend.service; + +import com.oracle.bmc.generativeai.GenerativeAiClient; +import com.oracle.bmc.generativeai.model.ModelCapability; +import com.oracle.bmc.generativeai.requests.ListModelsRequest; +import com.oracle.bmc.generativeai.responses.ListModelsResponse; +import dev.victormartin.oci.genai.backend.backend.dao.GenAiModel; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Service; + +import java.util.List; +import java.util.stream.Collectors; + +@Service +public class GenAIModelsService { + Logger log = LoggerFactory.getLogger(GenAIModelsService.class); + + @Value("${genai.compartment_id}") + private String COMPARTMENT_ID; + + @Autowired + private GenAiClientService generativeAiClientService; + + public List getModels() { + log.info("getModels()"); + ListModelsRequest listModelsRequest = ListModelsRequest.builder() + .compartmentId(COMPARTMENT_ID) + .build(); + GenerativeAiClient client = generativeAiClientService.getClient(); + ListModelsResponse response = client.listModels(listModelsRequest); + return response.getModelCollection().getItems().stream() + .map(m -> { + List capabilities = m.getCapabilities().stream() + .map(ModelCapability::getValue).collect(Collectors.toList()); + GenAiModel model = new GenAiModel( + m.getId(), m.getDisplayName(), m.getVendor(), + m.getVersion(), capabilities, m.getTimeCreated()); + return model; + }).collect(Collectors.toList()); + } +} diff --git a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/OCIGenAIService.java b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/OCIGenAIService.java index a7b1d2a8..e93e7dc1 100644 --- a/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/OCIGenAIService.java +++ b/backend/src/main/java/dev/victormartin/oci/genai/backend/backend/service/OCIGenAIService.java @@ -1,48 +1,113 @@ package dev.victormartin.oci.genai.backend.backend.service; -import com.oracle.bmc.generativeaiinference.GenerativeAiInferenceClient; -import com.oracle.bmc.generativeaiinference.model.*; -import com.oracle.bmc.generativeaiinference.requests.ChatRequest; -import com.oracle.bmc.generativeaiinference.requests.GenerateTextRequest; -import com.oracle.bmc.generativeaiinference.requests.SummarizeTextRequest; -import com.oracle.bmc.generativeaiinference.responses.ChatResponse; -import com.oracle.bmc.generativeaiinference.responses.GenerateTextResponse; -import com.oracle.bmc.generativeaiinference.responses.SummarizeTextResponse; -import com.oracle.bmc.http.client.jersey.WrappedResponseInputStream; -import org.hibernate.boot.archive.scan.internal.StandardScanner; +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; -import java.io.*; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.stream.Collectors; +import com.oracle.bmc.generativeaiinference.model.BaseChatResponse; +import com.oracle.bmc.generativeaiinference.model.ChatChoice; +import com.oracle.bmc.generativeaiinference.model.ChatContent; +import com.oracle.bmc.generativeaiinference.model.ChatDetails; +import com.oracle.bmc.generativeaiinference.model.ChatResult; +import com.oracle.bmc.generativeaiinference.model.CohereChatRequest; +import com.oracle.bmc.generativeaiinference.model.CohereChatResponse; +import com.oracle.bmc.generativeaiinference.model.GenericChatRequest; +import com.oracle.bmc.generativeaiinference.model.GenericChatResponse; +import com.oracle.bmc.generativeaiinference.model.Message; +import com.oracle.bmc.generativeaiinference.model.OnDemandServingMode; +import com.oracle.bmc.generativeaiinference.model.TextContent; +import com.oracle.bmc.generativeaiinference.model.UserMessage; +import com.oracle.bmc.generativeaiinference.requests.ChatRequest; +import com.oracle.bmc.generativeaiinference.responses.ChatResponse; + +import dev.victormartin.oci.genai.backend.backend.dao.GenAiModel; +/** + * Provides an implementation of the OCI Gen AI service, allowing users to interact with various AI models + * from different vendors such as Cohere and Meta. + * + * This service enables features like text generationand summarisation. + */ @Service public class OCIGenAIService { + + Logger log = LoggerFactory.getLogger(OCIGenAIService.class); + @Value("${genai.compartment_id}") private String COMPARTMENT_ID; @Autowired private GenAiInferenceClientService generativeAiInferenceClientService; - public String resolvePrompt(String input, String modelId, boolean finetune) { - CohereChatRequest cohereChatRequest = CohereChatRequest.builder() - .message(input) - .maxTokens(600) - .temperature((double) 1) - .frequencyPenalty((double) 0) - .topP((double) 0.75) - .topK(0) - .isStream(false) // TODO websockets and streams - .build(); + @Autowired + private GenAIModelsService genAIModelsService; - ChatDetails chatDetails = ChatDetails.builder() - .servingMode(OnDemandServingMode.builder().modelId(modelId).build()) - .compartmentId(COMPARTMENT_ID) - .chatRequest(cohereChatRequest) - .build(); + public String resolvePrompt(String input, String modelId, boolean finetune, boolean summarization) { + + List models = genAIModelsService.getModels(); + GenAiModel currentModel = models.stream() + .filter(m-> modelId.equals(m.id())) + .findFirst() + .orElseThrow(); + + log.info("Model {} with finetune {}", currentModel.name(), finetune? "yes" : "no"); + + double temperature = summarization?0.0:0.5; + + String inputText = summarization?"Summarize this text:\n" + input: input; + + ChatDetails chatDetails; + switch (currentModel.vendor()) { + case "cohere": + CohereChatRequest cohereChatRequest = CohereChatRequest.builder() + .message(inputText) + .maxTokens(600) + .temperature(temperature) + .frequencyPenalty((double) 0) + .topP(0.75) + .topK(0) + .isStream(false) + .build(); + + chatDetails = ChatDetails.builder() + .servingMode(OnDemandServingMode.builder().modelId(currentModel.id()).build()) + .compartmentId(COMPARTMENT_ID) + .chatRequest(cohereChatRequest) + .build(); + break;› + case "meta": + ChatContent content = TextContent.builder() + .text(inputText) + .build(); + List contents = new ArrayList<>(); + contents.add(content); + List messages = new ArrayList<>(); + Message message = new UserMessage(contents, "user"); + messages.add(message); + GenericChatRequest genericChatRequest = GenericChatRequest.builder() + .messages(messages) + .maxTokens(600) + .temperature((double)1) + .frequencyPenalty((double)0) + .presencePenalty((double)0) + .topP(0.75) + .topK(-1) + .isStream(false) + .build(); + chatDetails = ChatDetails.builder() + .servingMode(OnDemandServingMode.builder().modelId(currentModel.id()).build()) + .compartmentId(COMPARTMENT_ID) + .chatRequest(genericChatRequest) + .build(); + break; + default: + throw new IllegalStateException("Unexpected value: " + currentModel.vendor()); + } ChatRequest request = ChatRequest.builder() .chatDetails(chatDetails) @@ -65,7 +130,7 @@ public String resolvePrompt(String input, String modelId, boolean finetune) { } public String summaryText(String input, String modelId, boolean finetuned) { - String response = resolvePrompt("Summarize this:\n" + input, modelId, finetuned); + String response = resolvePrompt(input, modelId, finetuned, true); return response; } } diff --git a/scripts/release.mjs b/scripts/release.mjs index 83a699c3..d8c41a92 100644 --- a/scripts/release.mjs +++ b/scripts/release.mjs @@ -53,9 +53,9 @@ config.set("ocir_user_email", ocir_user_email); config.set("ocir_user_token", ocir_user_token); await containerLogin(namespace, ocir_user, ocir_user_token, ocirUrl); -await releaseWeb(); +// await releaseWeb(); await releaseApp(); -await releaseBackend(); +// await releaseBackend(); async function releaseWeb() { const service = "web"; diff --git a/service/python/config.json.txt b/service/python/config.json.txt new file mode 100644 index 00000000..178c373f --- /dev/null +++ b/service/python/config.json.txt @@ -0,0 +1,8 @@ +{ + "compartment_id": "", + "config_profile": "", + "service_endpoint": "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", + "model_type": "", + "cohere_model_id": "", + "llama_model_id": "" +} \ No newline at end of file diff --git a/service/python/requirements.txt b/service/python/requirements.txt index 204b9200..a02ff4a2 100644 --- a/service/python/requirements.txt +++ b/service/python/requirements.txt @@ -4,7 +4,7 @@ cached-property==1.5.2 certifi==2024.7.4 cffi==1.16.0 circuitbreaker==1.4.0 -cryptography==43.0.1 +cryptography==42.0.6 oci==2.126.2 pycparser==2.21 pyOpenSSL==24.1.0 diff --git a/service/python/server.py b/service/python/server.py index 242994a3..951b0323 100644 --- a/service/python/server.py +++ b/service/python/server.py @@ -1,7 +1,7 @@ +import oci import asyncio import websockets import json -import oci from throttler import throttle from pypdf import PdfReader from io import BytesIO @@ -9,90 +9,93 @@ import re from types import SimpleNamespace -# TODO: Please update config profile name and use the compartmentId that has policies grant permissions for using Generative AI Service -compartment_id = "" -CONFIG_PROFILE = "DEFAULT" -config = oci.config.from_file('~/.oci/config', CONFIG_PROFILE) - -# Service endpoint -endpoint = "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com" -generative_ai_inference_client = ( - oci.generative_ai_inference.GenerativeAiInferenceClient( - config=config, - service_endpoint=endpoint, - retry_strategy=oci.retry.NoneRetryStrategy(), - timeout=(10, 240), - ) -) - -@throttle(rate_limit=15, period=65.0) -async def generate_ai_response(prompts): - prompt = "" - llm_inference_request = ( - oci.generative_ai_inference.models.CohereLlmInferenceRequest() - ) - llm_inference_request.prompt = prompts - llm_inference_request.max_tokens = 1000 - llm_inference_request.temperature = 0.75 - llm_inference_request.top_p = 0.7 - llm_inference_request.frequency_penalty = 1.0 +with open('config.json') as f: + config = json.load(f) - generate_text_detail = oci.generative_ai_inference.models.GenerateTextDetails() - generate_text_detail.serving_mode = oci.generative_ai_inference.models.DedicatedServingMode(endpoint_id="ocid1.generativeaiendpoint.oc1.us-chicago-1.amaaaaaaeras5xiavrsefrftfupp42lnniddgjnxuwbv5jypl64i7ktan65a") +# Load configuration parameters +compartment_id = config['compartment_id'] +CONFIG_PROFILE = config['config_profile'] +endpoint = config['service_endpoint'] +model_type = config['model_type'] +model_id = config[f'{model_type}_model_id'] - generate_text_detail.compartment_id = compartment_id - generate_text_detail.inference_request = llm_inference_request - - if "" in compartment_id: - print("ERROR:Please update your compartment id in target python file") - quit() +config = oci.config.from_file('~/.oci/config', CONFIG_PROFILE) - generate_text_response = generative_ai_inference_client.generate_text(generate_text_detail) - # Print result - print("**************************Generate Texts Result**************************") - print(vars(generate_text_response)) +generative_ai_inference_client = oci.generative_ai_inference.GenerativeAiInferenceClient( + config=config, + service_endpoint=endpoint, + retry_strategy=oci.retry.NoneRetryStrategy(), + timeout=(10, 240) +) - return generate_text_response +chat_detail = oci.generative_ai_inference.models.ChatDetails() +# Define a function to generate an AI response @throttle(rate_limit=15, period=65.0) -async def generate_ai_summary(summary_txt, prompt): - # You can also load the summary text from a file, or as a parameter in main - #with open('files/summarize_data.txt', 'r') as file: - # text_to_summarize = file.read() - - summarize_text_detail = oci.generative_ai_inference.models.SummarizeTextDetails() - summarize_text_detail.serving_mode = oci.generative_ai_inference.models.OnDemandServingMode(model_id="cohere.command") - summarize_text_detail.compartment_id = compartment_id - #summarize_text_detail.input = text_to_summarize - summarize_text_detail.input = summary_txt - summarize_text_detail.additional_command = prompt - summarize_text_detail.extractiveness = "AUTO" # HIGH, LOW - summarize_text_detail.format = "AUTO" # brackets, paragraph - summarize_text_detail.length = "LONG" # high, AUTO - summarize_text_detail.temperature = .25 # [0,1] - +async def generate_ai_response(prompts): + # Determine the request type based on the model type + if model_type == 'cohere': + chat_request = oci.generative_ai_inference.models.CohereChatRequest() + chat_request.max_tokens = 2000 + chat_request.temperature = 0.25 + chat_request.frequency_penalty = 0 + chat_request.top_p = 0.75 + chat_request.top_k = 0 + elif model_type == 'llama': + chat_request = oci.generative_ai_inference.models.GenericChatRequest() + chat_request.api_format = oci.generative_ai_inference.models.BaseChatRequest.API_FORMAT_GENERIC + chat_request.max_tokens = 2000 + chat_request.temperature = 1 + chat_request.frequency_penalty = 0 + chat_request.presence_penalty = 0 + chat_request.top_p = 0.75 + chat_request.top_k = -1 + else: + raise ValueError("Unsupported model type") + + # Process the prompts + if isinstance(prompts, str): + if model_type == 'cohere': + chat_request.message = prompts + else: + content = oci.generative_ai_inference.models.TextContent() + content.text = prompts + message = oci.generative_ai_inference.models.Message() + message.role = "USER" + message.content = [content] + chat_request.messages = [message] + elif isinstance(prompts, list): + chat_request.messages = prompts + else: + raise ValueError("Invalid input type for generate_ai_response") + + # Set up the chat detail object + chat_detail.chat_request = chat_request + on_demand_mode = oci.generative_ai_inference.models.OnDemandServingMode(model_id=model_id) + chat_detail.serving_mode = on_demand_mode + chat_detail.compartment_id = compartment_id + + # Send the request and get the response + chat_response = generative_ai_inference_client.chat(chat_detail) + + # Validate the compartment ID if "" in compartment_id: - print("ERROR:Please update your compartment id in target python file") + print("ERROR: Please update your compartment id in target python file") quit() - summarize_text_response = generative_ai_inference_client.summarize_text(summarize_text_detail) - - # Print result - #print("**************************Summarize Texts Result**************************") - #print(summarize_text_response.data) + # Print the chat result + print("**************************Chat Result**************************") + print(vars(chat_response)) - return summarize_text_response.data + return chat_response async def parse_pdf(file: BytesIO) -> List[str]: pdf = PdfReader(file) output = [] for page in pdf.pages: text = page.extract_text() - # Merge hyphenated words text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text) - # Fix newlines in the middle of sentences text = re.sub(r"(?