hlfx.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. import threading
  2. import pymysql
  3. import pandas as pd
  4. from sqlalchemy import create_engine
  5. # 数据库引擎
  6. # engine = create_engine('mysql+pymysql://root:r6kEwqWU9!v3@localhost:3307/qbh_hlfx?charset=utf8')
  7. # 连接数据库
  8. db = pymysql.connect(host='localhost',
  9. user='root',
  10. port=3307,
  11. password='r6kEwqWU9!v3',
  12. database='qbh_hlfx_backup')
  13. fre = '1d'
  14. cursor = db.cursor()
  15. # cursor.execute("select table_name from information_schema.tables where table_schema='qbh_hlfx_backup' and table_name like {}".format('\'%{}\''.format(fre)))
  16. cursor.execute('show tables like {}'.format('\'%{}\''.format(fre)))
  17. # table_list = [tuple[0] for tuple in cursor.fetchall()]
  18. # print(table_list)
  19. stk = threading.local()
  20. # 主程序
  21. # 找顶底(hdx lfx)分型
  22. def hlfx(table_list, engine, tosql):
  23. for table in table_list:
  24. # stk.fxdf = pd.DataFrame(columns=('date', 'open', 'close', 'high', 'low', 'volume', 'money', 'HL'))
  25. # stk.df_day = pd.read_sql_query('select date,open,close,high,low,volume,money,HL from %s' % table, engine)
  26. print(engine)
  27. print(tosql)
  28. # stk.df_day.to_sql(name='%s' % table, con=tosql, index=True, if_exists='replace')
  29. # with tosql.connect() as con_backup:
  30. # con_backup.execute('ALTER TABLE %s ADD PRIMARY KEY (`date`);' % table)
  31. # for i in stk.df_day.index:
  32. # m = i - 1
  33. # if i <= 3:
  34. # # stk.fxdf = pd.concat([stk.fxdf, stk.df_day.iloc[[i]]], ignore_index=True)
  35. # stk.df_day.loc[i, 'HL'] = '-'
  36. # # 底
  37. # elif ((stk.df_day.loc[i,'high']>stk.df_day.loc[i-1,'high']) and (stk.df_day.loc[i-2,'high']>stk.df_day.loc[i-1,'high'])):
  38. # # if ((stk.df_day.loc[i-2, 'date'] != stk.fxdf.iloc[-1,0]) and (stk.df_day.loc[i-3,'date'] != stk.fxdf.iloc[-1,0]) and (stk.df_day.loc[i-1,'date'] != stk.fxdf.iloc[-1,0])):
  39. # # stk.fxdf = pd.concat([stk.fxdf, stk.df_day.iloc[[i]]], ignore_index=True)
  40. # stk.df_day.loc[i, 'HL'] = 'L*'
  41. # while m:
  42. # if m == 1:
  43. # stk.df_day.loc[i, 'HL'] = 'l'
  44. # elif stk.df_day.loc[m, 'HL'] == 'H' or stk.df_day.loc[m, 'HL'] == 'h':
  45. # if(i-m) > 3:
  46. # stk.df_day.loc[i, 'HL'] = 'L'
  47. # break
  48. # elif (stk.df_day.loc[m, 'HL'] == 'L' or stk.df_day.loc[m, 'HL'] == 'l'):
  49. # if stk.df_day.loc[i-1, 'low'] < stk.df_day.loc[m-1, 'low']:
  50. # # 前一个为顶,且中间存在不包含 or 更低的底
  51. # stk.df_day.loc[i, 'HL'] = 'L'
  52. # break
  53. # else:
  54. # break
  55. # m = m-1
  56. #
  57. # # 顶
  58. # elif ((stk.df_day.loc[i,'high']<stk.df_day.loc[i-1,'high']) and (stk.df_day.loc[i-2,'high']<stk.df_day.loc[i-1,'high'])):
  59. # # if ((stk.df_day.loc[i-2, 'date'] != stk.fxdf.iloc[-1,0]) and (stk.df_day.loc[i-3,'date'] != stk.fxdf.iloc[-1,0]) and (stk.df_day.loc[i-1,'date'] != stk.fxdf.iloc[-1,0])):
  60. # # stk.fxdf = pd.concat([stk.fxdf, stk.df_day.iloc[[i]]], ignore_index=True)
  61. # stk.df_day.loc[i, 'HL'] = 'H*'
  62. # while m:
  63. # if m == 1:
  64. # stk.df_day.loc[i, 'HL'] = 'h'
  65. # elif stk.df_day.loc[m, 'HL'] == 'L' or stk.df_day.loc[m, 'HL'] == 'l':
  66. # if i-m > 3:
  67. # stk.df_day.loc[i, 'HL'] = 'H'
  68. # stk.df_day.loc[i, 9] = stk.df_day.loc[i, 'close'] - stk.df_day.loc[m, 'close']
  69. # break
  70. # elif (stk.df_day.loc[m, 'HL'] == 'H' or stk.df_day.loc[m, 'HL'] == 'h'):
  71. # if stk.df_day.loc[i-1, 'high'] > stk.df_day.loc[m-1, 'high']:
  72. # # 前一个为底,且中间存在不包含 or 更高的顶
  73. # stk.df_day.loc[i, 'HL'] = 'H'
  74. # break
  75. # break
  76. # m = m-1
  77. # else:
  78. # stk.df_day.loc[i, 'HL'] = '-'
  79. # stk.df_day.to_sql('%s' % table, con=engine, index=True, if_exists='replace', chunksize=20000)
  80. # print(table, '\n', stk.df_day)
  81. # stk.df_day.to_csv('/Users/daniel/Library/CloudStorage/OneDrive-个人/个人/python_stocks/20220212hlfx2/hlfx%s.csv' % table)
  82. # stk.df_day.to_sql(name='%s' % table, con=tosql, index=True, if_exists='replace')
  83. # with tosql.connect() as con_backup:
  84. # con_backup.execute('ALTER TABLE %s ADD PRIMARY KEY (`date`);' % table)
  85. table_list = ['stk002237_1d','stk000004_1d']
  86. # engine = create_engine('mysql+pymysql://root:r6kEwqWU9!v3@localhost:3307/qbh_hlfx_backup?charset=utf8')
  87. # tosql = create_engine('mysql+pymysql://root:r6kEwqWU9!v3@localhost:3307/bb22?charset=utf8')
  88. # hlfx(table_list, engine, tosql)
  89. step = 50
  90. thread_list = []
  91. engine = []
  92. tosql = []
  93. for i in range(0, len(table_list), step):
  94. engine.append(create_engine('mysql+pymysql://root:r6kEwqWU9!v3@localhost:3307/qbh_hlfx_backup?charset=utf8'))
  95. tosql.append(create_engine('mysql+pymysql://root:r6kEwqWU9!v3@localhost:3307/bb22?charset=utf8'))
  96. threading.Thread(target=hlfx, args=(table_list[i:i + step], engine[i], tosql[i],)).start()