You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
121 lines
5.7 KiB
121 lines
5.7 KiB
import json |
|
from tenacity import retry, wait_random_exponential, stop_after_attempt |
|
import sys |
|
from langchain.chains import LLMChain |
|
from langchain.prompts import PromptTemplate |
|
from langchain.llms import OpenAI as LangChainOpenAI |
|
|
|
GPT_MODEL = "gpt-4o" |
|
|
|
client = LangChainOpenAI(api_key="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", model_name=GPT_MODEL) |
|
|
|
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3)) |
|
def chat_completion_request(messages, tools=None): |
|
try: |
|
response = client(messages=messages, tools=tools) |
|
return response |
|
except Exception as e: |
|
print("Unable to generate ChatCompletion response") |
|
print(f"Exception: {e}") |
|
sys.exit(1) |
|
|
|
def get_backtest(symbols, sliding_window="six months", frequency="semi-annually", function="sharpe ratio"): |
|
symbol = ", ".join(symbols) |
|
return f"Backtest result for {symbol}: Annualized return: 10%, Annualized Sharpe ratio: 1.5, Annualized volatility: 20%, Maximum drawdown: 5%, Alpha: 0.1, Beta: 0.8, VaR10: 5%, R2: 0.9" |
|
|
|
def backtest_main(query): |
|
tools = { |
|
"get_backtest": { |
|
"description": "Get the portfolio backtesting result by combining a list of symbols, sliding window, optimization frequency, and optimization function", |
|
"parameters": { |
|
"symbol": { |
|
"type": "array", |
|
"description": "An array of multiple portfolio symbols to be backtested. If the symbol is Taiwan Stock Exchange, the code should be formatted as TSMC to 2330.TW. If there are multiple symbols, return in a Python list format", |
|
}, |
|
"sliding window": { |
|
"type": "string", |
|
"enum": ["one month", "three months", "six months", "one year"], |
|
"description": "The sliding window size to be backtested: one month, three months, six months, or one year", |
|
}, |
|
"frequency": { |
|
"type": "string", |
|
"enum": ["monthly", "quarterly", "semi-annually", "annually"], |
|
"description": "The optimization frequency to be backtested: monthly, quarterly, semi-annually, or annually", |
|
}, |
|
"function": { |
|
"type": "string", |
|
"enum": ["sharpe ratio", "sortino ratio", "volatility", "utility function"], |
|
"description": "The optimization function to be backtested: Sharpe ratio, Sortino ratio, volatility, or utility function", |
|
}, |
|
}, |
|
"required": ["symbol", "sliding window", "frequency", "function"], |
|
} |
|
} |
|
|
|
initial_prompt = PromptTemplate.from_template( |
|
"You are a software developer writing a function to get the portfolio backtesting result using different symbols, sliding window, optimization frequency, and optimization function. " |
|
"Only the symbol is required; the sliding window, frequency, and function are optional. The sliding window size can default to six months, the optimization frequency can default to semi-annually, and the optimization function can default to Sharpe ratio." |
|
) |
|
|
|
secondary_prompt = PromptTemplate.from_template( |
|
"You are a professional financial analyst. The user will provide you with some results from their backtesting of an investment portfolio using the Efficient Frontier calculation. These results include annualized return, annualized Sharpe ratio, annualized volatility, maximum drawdown, Alpha, Beta, VaR10, and R2. Please provide professional advice in Traditional Chinese based on these reports." |
|
) |
|
|
|
main_chain = LLMChain( |
|
prompt=initial_prompt, |
|
llm=client, |
|
) |
|
|
|
tool_chain = LLMChain( |
|
prompt=secondary_prompt, |
|
llm=client, |
|
) |
|
|
|
messages = [{"role": "system", "content": initial_prompt.template}, {"role": "user", "content": query}] |
|
chat_response = main_chain.run(messages=messages) |
|
|
|
if not chat_response or "choices" not in chat_response or not chat_response["choices"]: |
|
print("No valid response received from the initial prompt.") |
|
return |
|
|
|
assistant_messages = chat_response["choices"][0].get("message", {}).get("tool_calls", []) |
|
if not assistant_messages: |
|
print("No tool calls received in the assistant messages.") |
|
return |
|
|
|
function_responses = [] |
|
|
|
available_functions = { |
|
"get_backtest": get_backtest, |
|
} |
|
|
|
for tool_call in assistant_messages: |
|
function_name = tool_call["function"]["name"] |
|
function_to_call = available_functions.get(function_name) |
|
if function_to_call: |
|
function_args = json.loads(tool_call["function"]["arguments"]) |
|
function_response = function_to_call( |
|
symbols=function_args.get("symbol"), |
|
sliding_window=function_args.get("sliding window"), |
|
frequency=function_args.get("frequency"), |
|
function=function_args.get("function"), |
|
) |
|
function_responses.append(function_response) |
|
|
|
if not function_responses: |
|
print("No valid function responses generated.") |
|
return |
|
|
|
sec_messages = [{"role": "system", "content": secondary_prompt.template}, {"role": "user", "content": function_responses[0]}] |
|
result_messages = tool_chain.run(messages=sec_messages) |
|
|
|
if not result_messages or "choices" not in result_messages or not result_messages["choices"]: |
|
print("No valid response received from the secondary prompt.") |
|
return |
|
|
|
return result_messages["choices"][0]["message"]["content"] |
|
|
|
query = "我想要使用台積電和蘋果股票來進行最大夏普比率的回測" |
|
article = backtest_main(query) |
|
if article: |
|
print(article)
|
|
|