download_data_whole.py 3.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. from xtquant import xtdata
  2. from datetime import datetime as dt
  3. import pandas as pd
  4. import math
  5. from sqlalchemy import create_engine
  6. import multiprocessing as mp
  7. import os
  8. from apscheduler.schedulers.blocking import BlockingScheduler
  9. import traceback
  10. pd.set_option('display.max_columns', None) # 设置显示最大行
  11. path = 'C:\\qmt\\userdata_mini'
  12. field = ['time', 'open', 'close', 'high', 'low', 'volume', 'amount']
  13. cpu_count = mp.cpu_count()
  14. eng_w = create_engine('mysql+pymysql://root:r6kEwqWU9!v3@localhost:3307/qmt_stocks_whole?charset=utf8')
  15. def err_call_back(err):
  16. print(f'问题在这里~ error:{str(err)}')
  17. traceback.print_exc()
  18. def to_sql(stock_list):
  19. print(f'{dt.now()}开始循环入库! MyPid is {os.getpid()}')
  20. m = 0
  21. for stock in stock_list:
  22. # 后复权数据
  23. data_back = xtdata.get_market_data(field, [stock], '1d', end_time='', count=-1, dividend_type='back')
  24. df_back = pd.concat([data_back[i].loc[stock].T for i in ['time', 'open', 'high', 'low', 'close', 'volume',
  25. 'amount']], axis=1)
  26. df_back.columns = ['time', 'open_back', 'high_back', 'low_back', 'close_back', 'volume_back', 'amount_back']
  27. df_back['time'] = df_back['time'].apply(lambda x: dt.fromtimestamp(x / 1000.0))
  28. df_back.reset_index(drop=True, inplace=True)
  29. # 前复权数据
  30. data_front = xtdata.get_market_data(field, [stock], '1d', end_time='', count=-1, dividend_type='front')
  31. df_front = pd.concat([data_front[i].loc[stock].T for i in ['time', 'open', 'high', 'low', 'close', 'volume',
  32. 'amount']], axis=1)
  33. df_front.columns = ['time', 'open_front', 'high_front', 'low_front', 'close_front', 'volume_front',
  34. 'amount_front']
  35. df_front['time'] = df_front['time'].apply(lambda x: dt.fromtimestamp(x / 1000.0))
  36. df = pd.merge_asof(df_back, df_front, 'time')
  37. # print(df)
  38. try:
  39. df.to_sql('%s_1d' % stock, con=eng_w, index=True, if_exists='replace')
  40. except BaseException:
  41. print(stock)
  42. pass
  43. else:
  44. m += 1
  45. print(f'Pid:{os.getpid()}已经完工了.应入库{len(stock_list)},共入库{m}支个股')
  46. def download_data():
  47. stock_list = xtdata.get_stock_list_in_sector('沪深A股')
  48. stock_list.sort()
  49. print(dt.now(), '开始下载!')
  50. xtdata.download_history_data2(stock_list=stock_list, period='1d', start_time='', end_time='')
  51. print(dt.now(), '下载完成,准备入库!')
  52. step = math.ceil(len(stock_list) / mp.cpu_count())
  53. pool = mp.Pool(processes=mp.cpu_count())
  54. for i in range(0, len(stock_list), step):
  55. pool.apply_async(func=to_sql, args=(stock_list[i:i+step],), error_callback=err_call_back)
  56. pool.close()
  57. pool.join()
  58. print(f'今日数据下载完毕 {dt.now()}')
  59. if __name__ == '__main__':
  60. field = ['time', 'open', 'close', 'high', 'low', 'volume', 'amount']
  61. cpu_count = mp.cpu_count()
  62. # download_data()
  63. scheduler = BlockingScheduler()
  64. scheduler.add_job(func=download_data, trigger='cron', day_of_week='0-4', hour='20', minute='10',
  65. timezone="Asia/Shanghai")
  66. try:
  67. scheduler.start()
  68. except (KeyboardInterrupt, SystemExit):
  69. pass