You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

116 lines
4.2 KiB

import telebot
from config import Config
import pdfplumber
import time
import requests
from datetime import datetime
import schedule
BOT_TOKEN = '6701395239:AAFE30dqvNihDdni9vYoAbWssO-X5yAmwho'
# BOT_TOKEN = "6746720034:AAEMaoV2FwIZ8pz_PF18-bo2a6gFC1eVtVs"
#BOT_TOKEN = '6589162555:AAHGhrTQ0wYNtIUySMohnfpxQl1d6blr24Q'
def broadcast_message(message:str,chat_id:str):
r = requests.post(f"https://api.telegram.org/bot{BOT_TOKEN}/sendMessage",
json={
"chat_id": chat_id,
"text": message,
},
)
def start():
global startime
startime = time.time()
url = "https://www.bea.gov/news/2024/personal-income-and-outlays-december-2023"
text = find_indicate(url)
return text
def find_indicate_bs4(url):
from bs4 import BeautifulSoup
import requests
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# 使用CSS選擇器找到元素
# element = soup.select("tr",class_ ='item-fact-row' )
print(soup)
# return element[1].text.split('+')[0]+" "+element[1].text.split('+')[1]
def find_indicate(url):
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
options = Options()
# options.add_argument("--headless") # 啟用無頭模式
driver =webdriver.Chrome(options=options)
driver.get(url)
# time.sleep(3)
date = driver.find_element(By.XPATH, '//*[@id="test"]/div[2]/article/div/div/div[1]/div[2]/div/div[1]/div/table/tbody/tr[1]/td[1]')
value = driver.find_element(By.XPATH, '//*[@id="test"]/div[2]/article/div/div/div[1]/div[2]/div/div[1]/div/table/tbody/tr[1]/td[2]')
return date.text , value.text
# 是一個持續運行的迴圈,不斷從Telegram伺服器抓取新的消息
# 然後使用上面定義的消息處理器來處理這些消息。
#非農就業人數
def read_pdf_nonfarm(month, year):
pdf = pdfplumber.open(f"empsit/empsit_{month}_{year}.pdf")
page = pdf.pages[0]
text = page.extract_text().split('\n')
text = (text[7]+text[8]).split(',')
text = text[0]+text[1]+text[2]
return text
def read_nonfarm():
startimee = time.time()
for i in range(7,13):
print(f"2022年{i}月非農就業人數: ", end= "" )
print(read_pdf_nonfarm(i, 23))
endtimee = time.time()
print("Time: ", endtimee-startimee)
# print(text.split('\n')[7:9])
def read_PCE():
message = find_indicate_bs4("https://www.bea.gov/data/personal-consumption-expenditures-price-index")
return message
# def read_PCE_test():
# from bs4 import BeautifulSoup
# import requests
# response = requests.get("http://127.0.0.1:5000")
# soup = BeautifulSoup(response.text, 'html.parser')
#
# # 使用CSS選擇器找到元素
# element = soup.select("p")
# print(element[0].text)
# return element[0].text
def broadcast_all(target:str):
startimee = time.time()
message = read_PCE()
while message.split(' ')[0] != target:
message = read_PCE()
broadcast_message(message, "-1002033782195")
endtimee = time.time()
broadcast_message(
f"Now :{datetime.fromtimestamp(time.time())} , Spend time :{str(round(endtimee - startimee, 3))} s",
"-1002033782195")
def broadcast_all_sele(target:str):
startimee = time.time()
date , message = find_indicate("https://www.bea.gov/data/personal-consumption-expenditures-price-index")
while date.split(' ')[0] != target:
message = read_PCE()
broadcast_message(date+message, "-1002033782195")
endtimee = time.time()
broadcast_message(
f"Now :{datetime.fromtimestamp(time.time())} , Spend time :{str(round(endtimee - startimee, 3))} s",
"-1002033782195")
if __name__ == "__main__":
#PCE
print("Start Time:" , datetime.fromtimestamp(time.time()))
schedule.every().day.at("21:20").do(broadcast_all_sele, "December")
while True:
if datetime.now().strftime("%Y-%m-%d %H:%M") == "2024-02-29 21:20":
schedule.run_pending()
time.sleep(0.1) # Check every 0.1 seconds