Sfoglia il codice sorgente

Merge branch 'master' of http://34.206.75.59:10880/ASJ_ADS/sync_amz_data

guojing_wu 1 anno fa
parent
commit
063da4a8ff
37 ha cambiato i file con 1499 aggiunte e 207 eliminazioni
  1. 2 0
      Pipfile
  2. 13 4
      start_sync_amz.py
  3. 4 4
      start_sync_amz_RightNowRun.py
  4. 295 0
      sync_amz_data/public/adjust_budget_bid.py
  5. 35 1
      sync_amz_data/public/amz_ad_client.py
  6. 728 169
      sync_amz_data/public/sp_api_client.py
  7. 12 0
      sync_amz_data/settings/__init__.py
  8. 4 0
      sync_amz_data/settings/dev.py
  9. 5 0
      sync_amz_data/settings/online.py
  10. 1 1
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sb_keywordsbid_recommendations_v3.py
  11. 2 2
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbads.py
  12. 1 1
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbcampaign.py
  13. 2 2
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbgroup.py
  14. 19 1
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbkeyword_v3.py
  15. 1 1
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbnegativekeyword.py
  16. 2 2
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbtarget_v3.py
  17. 1 1
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbtargetbid_recommendations_v3.py
  18. 87 0
      sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbthemetargeting_v3.py
  19. 1 1
      sync_amz_data/tasks/datainsert/SD/mysql_datainsert_sdcampaign.py
  20. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sp_budget_recommendation.py
  21. 5 3
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sp_targetsbid_recommendations.py
  22. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sp_targetsbid_recommendations_v2.py
  23. 4 4
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spads.py
  24. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spcampaign.py
  25. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spgroup.py
  26. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spkeyword.py
  27. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spnegativekeyword.py
  28. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spnegativetarget.py
  29. 1 1
      sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sptarget.py
  30. 22 0
      sync_amz_data/tasks/datainsert/alldata_insert.py
  31. 44 0
      sync_amz_data/tasks/datainsert/categories_updata.py
  32. 110 0
      sync_amz_data/tasks/datainsert/mysql_datainsert_assets.py
  33. 1 1
      sync_amz_data/tasks/datainsert/mysql_datainsert_portfolios.py
  34. 2 1
      sync_amz_data/tasks/datainsert/wg.py
  35. 24 0
      sync_get_open_listing_data.py
  36. 31 0
      sync_get_order_data.py
  37. 33 0
      sync_listing_order_Retry.py

+ 2 - 0
Pipfile

@@ -11,6 +11,8 @@ apscheduler = "==3.10.4"
 s3fs = "==2023.10.0"
 clickhouse-connect = "==0.6.18"
 pandas = "==2.1.2"
+PyMySQL="==1.1.0"
+python-amazon-sp-api="==1.4.0"
 
 [dev-packages]
 

+ 13 - 4
start_sync_amz.py

@@ -7,8 +7,11 @@ from apscheduler.schedulers.blocking import BlockingScheduler
 
 
 def amz_report(AWS_CREDENTIALS,para=None):
-    refresh_token = shop_infos(AWS_CREDENTIALS['profile_id'])['refresh_token']
-    AWS_CREDENTIALS['refresh_token'] = refresh_token
+    try:
+        refresh_token = shop_infos(AWS_CREDENTIALS['profile_id'])['refresh_token']
+        AWS_CREDENTIALS['refresh_token'] = refresh_token
+    except Exception as e:
+        print(e)
 
     conn = SB_ETL(**AWS_CREDENTIALS).clickhouse_connect()
     sb_report = SB_ETL(**AWS_CREDENTIALS)
@@ -86,8 +89,11 @@ if __name__ == '__main__':
         'lwa_client_secret': 'cbf0514186db4df91e04a8905f0a91b605eae4201254ced879d8bb90df4b474d',
         'profile_id': "3006125408623189"
     }
-    refresh_token = shop_infos(AWS_CREDENTIALS['profile_id'])['refresh_token']
-    AWS_CREDENTIALS['refresh_token'] = refresh_token
+    try:
+        refresh_token = shop_infos(AWS_CREDENTIALS['profile_id'])['refresh_token']
+        AWS_CREDENTIALS['refresh_token'] = refresh_token
+    except Exception as e:
+        print(e)
 
     timezone_ = Common_ETLMethod(**AWS_CREDENTIALS).timeZone()
     print(timezone_)
@@ -96,3 +102,6 @@ if __name__ == '__main__':
     sched.start()
 
 
+
+
+

+ 4 - 4
start_sync_amz_RightNowRun.py

@@ -93,11 +93,11 @@ if __name__ == '__main__':
 
     refresh_token = shop_infos(AWS_CREDENTIALS['profile_id'])['refresh_token']
     AWS_CREDENTIALS['refresh_token'] = refresh_token
-    amz_report(conn, AWS_CREDENTIALS=AWS_CREDENTIALS)
+    # amz_report(conn, AWS_CREDENTIALS=AWS_CREDENTIALS)
 
-    # list_date = ["2023-10-22",]
-    # list_date = [f'2023-11-{"0"+str(i) if len(str(i))==1 else i}' for i in range(27,30)]
-    # print(list_date)
+    # list_date = ["2024-01-12","2024-01-11"]
+    # list_date = [f'2024-02-{"0"+str(i) if len(str(i))==1 else i}' for i in range(12,18)]
+    # # print(list_date)
     # for date_ in list_date:
     #     print(date_)
     #     print(date_.replace("-",""))

+ 295 - 0
sync_amz_data/public/adjust_budget_bid.py

@@ -0,0 +1,295 @@
+import pymysql
+import pandas as pd
+import numpy as np
+from datetime import datetime, timedelta, timezone
+
+pd.set_option('display.max_columns', None)
+pd.set_option('expand_frame_repr', False)
+import warnings
+from typing import Literal
+import json
+
+warnings.filterwarnings('ignore')
+
+
+class Automation_Bid_Budget:
+    def __init__(self, campaign_id,
+                 time_period: Literal["1week", "2weeks", "4weeks", "6weeks", "8weeks", "12weeks"] = "8weeks"
+                 ):
+        self.campaign_id = campaign_id
+        self.time_period = time_period  # 默认初始化历史周期8周
+
+    def database_conv_traf(self):  # 连接数据库conversion、traffic
+        conn = pymysql.connect(user="admin",
+                               password="pvmBNS8q3duiUvvp",
+                               host="amzn-retail.cluster-cnrgrbcygoap.us-east-1.rds.amazonaws.com",
+                               database="zosi_ad_marketing_stream",
+                               port=3306)
+        return conn
+
+    def get_sp_conversion(self):  # 获取转化
+        conn = self.database_conv_traf()
+        cursor = conn.cursor()
+        sql = "select * from zosi_ad_marketing_stream.sp_conversion_raw"
+        sql = sql + self.add_condition(isbudgetTable=False)
+        cursor.execute(sql)
+        columns_name = [i[0] for i in cursor.description]
+        rel = cursor.fetchall()
+        df = pd.DataFrame(rel, columns=columns_name)
+        df = df.groupby('idempotency_id').head(1)
+        return df
+
+    def get_sp_traffic(self):  # 获取流量
+        conn = self.database_conv_traf()
+        cursor = conn.cursor()
+        sql = "select * from zosi_ad_marketing_stream.sp_traffic_raw"
+        sql = sql + self.add_condition(isbudgetTable=False)
+        # print(sql)
+        cursor.execute(sql)
+        columns_name = [i[0] for i in cursor.description]
+        rel = cursor.fetchall()
+        df = pd.DataFrame(rel, columns=columns_name)
+        df = df.groupby('idempotency_id').head(1)
+        return df
+
+    def get_sp_budgetug(self):  # 获取预算
+        conn = self.database_conv_traf()
+        cursor = conn.cursor()
+        sql = "select * from zosi_ad_marketing_stream.sp_budget_usage"
+        sql = sql + self.add_condition(isbudgetTable=True)
+        cursor.execute(sql)
+        columns_name = [i[0] for i in cursor.description]
+        rel = cursor.fetchall()
+        df = pd.DataFrame(rel, columns=columns_name)
+        return df
+
+    def add_condition(self, isbudgetTable=False):  # 添加筛选时间周期
+        if self.time_period == '1week':
+            time_ = datetime.today().date() + timedelta(days=-7)
+        elif self.time_period == '2weeks':
+            time_ = datetime.today().date() + timedelta(days=-14)
+        elif self.time_period == '4weeks':
+            time_ = datetime.today().date() + timedelta(days=-28)
+        elif self.time_period == '6weeks':
+            time_ = datetime.today().date() + timedelta(days=-42)
+        elif self.time_period == '8weeks':
+            time_ = datetime.today().date() + timedelta(days=-56)
+        elif self.time_period == '12weeks':
+            time_ = datetime.today().date() + timedelta(days=-84)
+        if isbudgetTable:
+            return f" where usage_updated_timestamp>='{time_}' and budget_scope_id='{self.campaign_id}'"
+        return f" where time_window_start>='{time_}' and campaign_id='{self.campaign_id}'"
+
+    def merge_common_operation(self):  # 转化与流量连表
+        conversion = self.get_sp_conversion()
+        conversion_ = conversion.groupby(
+            ['advertiser_id', 'marketplace_id', 'time_window_start', 'campaign_id', 'ad_group_id', 'ad_id',
+             'keyword_id', 'placement', 'currency']).agg({
+            'attributed_sales_1d': sum,
+            'attributed_sales_1d_same_sku': sum,
+            'attributed_sales_7d': sum,
+            'attributed_sales_7d_same_sku': sum,
+            'attributed_sales_14d': sum,
+            'attributed_sales_14d_same_sku': sum,
+            'attributed_sales_30d': sum,
+            'attributed_sales_30d_same_sku': sum,
+            'attributed_conversions_1d': sum,
+            'attributed_conversions_1d_same_sku': sum,
+            'attributed_conversions_7d': sum,
+            'attributed_conversions_14d_same_sku': sum,
+            'attributed_conversions_30d': sum,
+            'attributed_conversions_30d_same_sku': sum,
+            'attributed_units_ordered_1d': sum,
+            'attributed_units_ordered_1d_same_sku': sum,
+            'attributed_units_ordered_7d': sum,
+            'attributed_units_ordered_7d_same_sku': sum,
+            'attributed_units_ordered_14d': sum,
+            'attributed_units_ordered_14d_same_sku': sum,
+            'attributed_units_ordered_30d': sum,
+            'attributed_units_ordered_30d_same_sku': sum
+        }).reset_index()
+        traffic = self.get_sp_traffic()
+        traffic[['impressions', 'clicks']] = traffic[['impressions', 'clicks']].astype('int64')
+        traffic['cost'] = traffic['cost'].astype('float64')
+
+        traffic_ = traffic.groupby(
+            ['advertiser_id', 'marketplace_id', 'time_window_start', 'campaign_id', 'ad_group_id', 'ad_id',
+             'keyword_id', 'keyword_text', 'placement', 'match_type', 'currency'
+             ]).agg({'impressions': sum,
+                     'clicks': sum,
+                     'cost': sum
+                     }).reset_index()
+        traffic_conversion = traffic_.merge(conversion_,
+                                            on=['advertiser_id', 'marketplace_id', 'campaign_id', 'ad_group_id',
+                                                'ad_id', 'keyword_id', 'placement', 'time_window_start', 'currency'],
+                                            how='inner')
+        if len(traffic_conversion) < 1:
+            return pd.DataFrame()
+        traffic_conversion['hour'] = traffic_conversion['time_window_start'].dt.hour
+        traffic_conversion['day'] = traffic_conversion['time_window_start'].dt.dayofweek
+        traffic_conversion = traffic_conversion.groupby(
+            ['campaign_id', 'ad_group_id', 'keyword_id', 'hour']).sum().reset_index()
+        traffic_conversion['cpc'] = traffic_conversion['cost'] / traffic_conversion['clicks']
+        # traffic_conversion['cpc'].fillna(0,inplace=True)
+        # traffic_conversion['cpc'] = traffic_conversion['cpc'].replace([np.inf,np.nan,pd.NA],0)
+        return traffic_conversion
+
+    def pre_deal(self, traffic_conversion):  # 前处理,补全数据
+        if len(traffic_conversion) < 1:
+            return []
+        pro_list = traffic_conversion.groupby(['campaign_id', 'ad_group_id', 'keyword_id']).head(1)[
+            ['campaign_id', 'ad_group_id', 'keyword_id']].to_numpy().tolist()
+        for i in pro_list:  # 补全24小时的数据
+            cam_, adg, kid = i[0], i[1], i[2]
+            df0 = traffic_conversion.query("campaign_id==@cam_ and ad_group_id==@adg and keyword_id==@kid")
+            for hour in range(24):
+                if hour not in df0['hour'].tolist():
+                    traffic_conversion = traffic_conversion.append(
+                        {'campaign_id': cam_, 'ad_group_id': adg, 'keyword_id': kid, 'hour': hour},
+                        ignore_index=True)
+        traffic_conversion['cpc_min'] = traffic_conversion.groupby(['campaign_id', 'ad_group_id', 'keyword_id'])[
+            'cpc'].transform('min')
+
+        traffic_conversion = traffic_conversion.sort_values(by=['campaign_id', 'ad_group_id', 'keyword_id', 'hour'])
+
+        # 给当前没有竞价信息的赋予竞价,为该关键词最小竞价的45%
+        traffic_conversion['cpc'] = traffic_conversion.apply(
+            lambda x: x['cpc_min'] * 0.45 if pd.isna(x['cpc']) or x['cpc'] is None else x['cpc'], axis=1)
+        return traffic_conversion
+
+    def func_rule_budget(self, traffic_conversion):  # 预算规则
+        if len(traffic_conversion) < 1:
+            return pd.DataFrame(columns=['hour', 'pre_percent_s3'])
+        traffic_conversion = self.pre_deal(traffic_conversion)
+        # total_spend = traffic_conversion['cpc'].sum()
+        # 根据小时对竞价、转化、点击汇总
+        tf_c = traffic_conversion.groupby(['hour']).agg(
+            {'cpc': sum, 'attributed_conversions_1d': sum, 'clicks': sum}).reset_index()
+        # 根据以下公式,突出转化高与竞价低的重要性
+        tf_c['pre_percent'] = tf_c.apply(
+            lambda x: (x['attributed_conversions_1d'] ** 3 - (x['clicks'] - x['attributed_conversions_1d']) ** 3) / x[
+                'cpc'] ** 3 + 1.001, axis=1)
+        tf_c['pre_percent'] = tf_c['pre_percent'].map(lambda x: np.sqrt(x))  # 避免各时间之间差距太大,进行开根处理
+        # 对无效数值/空值 赋值1.0001
+        tf_c['pre_percent'] = tf_c['pre_percent'].map(lambda x: 1.0001 if pd.isna(x) or x is None else x)
+        # 对23-5点的权重值降低至60%
+        tf_c['pre_percent_s2'] = tf_c.apply(
+            lambda x: x['pre_percent'] * 0.6 if x['hour'] < 6 or x['hour'] > 22 else x['pre_percent'], axis=1)
+        total_val = tf_c['pre_percent_s2'].sum()
+        # print(total_val)
+        # 计算各小时权重(初次分配权重,后续修正)
+        tf_c['pre_percent_s2'] = tf_c['pre_percent_s2'] / total_val
+        # 对分配过度不均衡进行调整,对超过分配的25%部分只给予25%的权重百分比
+        tf_c['pre_percent_s3'] = tf_c['pre_percent_s2'].map(lambda x: 0.25 if x > 0.25 else x)
+        tf_c['temp'] = tf_c['pre_percent_s2'] - tf_c['pre_percent_s3']
+        total_allocate = tf_c['temp'].sum()
+        allocate_count = tf_c['temp'].tolist().count(0.25)
+        allocate_val = total_allocate / allocate_count if allocate_count != 0 else 0
+        # 将超过25%的权重分配到其余小时区间内
+        tf_c['pre_percent_s3'] = tf_c['pre_percent_s3'].map(lambda x: x + allocate_val if x != 0.25 else 0.25)
+        return tf_c[['hour', 'pre_percent_s3']]
+
+    def budget_allocate_singleDay(self):  # 总结历史的数据,对单天预算分配
+        traffic_conversion = self.merge_common_operation()
+        traffic_conversion = self.pre_deal(traffic_conversion)
+        traffic_conversion = self.func_rule_budget(traffic_conversion)
+        traffic_conversion.columns = ['hour', 'SingleDay']
+        return json.dumps({"budget_allocate_singleDay": traffic_conversion.to_dict(orient='records')})
+
+    def budget_allocate_week(self):  # 总结过去每个不同日的数据,对每周每天预算都进行不同分配
+        traffic_conversion = self.merge_common_operation()
+        df = self.pre_deal(traffic_conversion[traffic_conversion['day'] == 0])
+        df = self.func_rule_budget(df)
+        for i in range(1, 7):
+            df1 = self.pre_deal(traffic_conversion[traffic_conversion['day'] == i])
+            df1 = self.func_rule_budget(df1)
+            df = pd.merge(df, df1, how='left', on='hour')
+
+        df.columns = ["hour", 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
+        return json.dumps({"budget_allocate_week": df.round(4).to_dict(orient='records')})
+
+    def rule_set_bid(self, avg_weight, cr, avg_cr, ctr, avg_ctr, weight_value, hour):  # 竞价规则设置
+        if weight_value > avg_weight * 1.5:  # 表现极好词
+            return 2
+        elif weight_value > avg_weight * 1.25:  # 表现较好词
+            if hour in [23, 0, 1, 2, 3, 4, 5]:
+                return 1.5
+            else:
+                return 1.5 + np.random.randint(100, 300) / 1000
+        elif weight_value > avg_weight * 1.15:  # 表现稍好词
+            if hour in [23, 0, 1, 2, 3, 4, 5]:
+                return 1.25
+            else:
+                return 1.5 + np.random.randint(100, 200) / 1000
+        elif weight_value > avg_weight:  # 标准权重词
+            return 1
+        else:
+            if ctr >= avg_ctr and cr >= 0.75 * avg_ctr:  # 点击较高,转化稍差词
+                return 1
+            elif cr > avg_ctr:  # 转化高词,点击不好的词
+                return 1.25
+            elif cr > 0.75 * avg_cr:  # 转化较差词
+                return 0.75
+            else:  # 该小时无cr、ctr记录的,并且时间不在23-5点的词
+                if ((pd.isna(cr) and pd.isna(ctr)) or None in [cr, ctr]) and hour not in [23, 0, 1, 2, 3, 4, 5]:
+                    return [0.5, 0.7, 0.8, 0.9, 1, 1.1][np.random.randint(0, 5)]
+            return 0.5  # 其余条件的词
+
+    def func_rule_bid(self, traffic_conversion):  # 竞价规则应用
+        if len(traffic_conversion) < 1:
+            return pd.DataFrame(columns=['hour', 'weight_allocate'])
+        tf_c = traffic_conversion.groupby(['hour']).agg(
+            {'cost': sum, 'attributed_conversions_1d': sum, 'clicks': sum, 'impressions': sum}).reset_index()
+        tf_c['cpc'] = tf_c['cost'] / tf_c['clicks']
+        tf_c['cr'] = tf_c['attributed_conversions_1d'] / tf_c['clicks']
+        tf_c['ctr'] = tf_c['clicks'] / tf_c['impressions']
+        avg_bid = tf_c['cpc'].mean()
+        avg_cr = tf_c['attributed_conversions_1d'].sum() / tf_c['clicks'].sum()
+        avg_ctr = tf_c['clicks'].sum() / tf_c['impressions'].sum()
+        tf_c['weight_value'] = tf_c['cr'] / tf_c['cpc']
+        avg_weight = avg_cr / avg_bid
+        # avg_weight = tf_c['weight_value'].mean()
+
+        tf_c['weight_allocate'] = tf_c.apply(
+            lambda x: self.rule_set_bid(avg_weight, x['cr'], avg_cr, x['ctr'], avg_ctr, x['weight_value'], x['hour']),
+            axis=1)
+        return tf_c[['hour', 'weight_allocate']].round(2)
+
+    def bid_adjust_singleDay(self):
+        traffic_conversion = self.merge_common_operation()
+        # traffic_conversion = self.pre_deal(traffic_conversion)
+        tf_c = self.pre_deal(traffic_conversion)
+        tf_c = self.func_rule_bid(tf_c)
+        tf_c.columns = ['hour', 'SingleDay']
+        # 完成
+        return json.dumps({"bid_adjust_singleDay": tf_c.to_dict(orient='records')})
+
+    def bid_adjust_week(self):
+        traffic_conversion = self.merge_common_operation()
+        # 单独筛选周一至周日每天的traffic,再进行聚合
+        df = self.pre_deal(traffic_conversion[traffic_conversion['day'] == 0])
+        df = self.func_rule_bid(df)
+        for i in range(1, 7):
+            df1 = self.pre_deal(traffic_conversion[traffic_conversion['day'] == i])
+            df1 = self.func_rule_bid(df1)
+            df = pd.merge(df, df1, how='left', on='hour')
+
+        df.columns = ["hour", 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
+
+        return json.dumps({"bid_adjust_week": df.to_dict(orient='records')})
+
+
+if __name__ == '__main__':
+    adjust_ = Automation_Bid_Budget(campaign_id='325523075677132')
+
+    # 竞价分配
+    bid_adjust = adjust_.bid_adjust_week()
+    print(bid_adjust)
+
+    print()
+
+    # 预算分配
+    budget_adjust = adjust_.budget_allocate_week()
+    print(budget_adjust)
+
+

+ 35 - 1
sync_amz_data/public/amz_ad_client.py

@@ -627,7 +627,7 @@ class SBClient(BaseClient):
 
     def get_keywords(self,**param):
         url_path = "/sb/keywords"
-        return self._request(url_path, method="GET",params=param)
+        return self._request(url_path, method="GET", params=param)
 
     def get_keyword(self,keywordid):
         url_path = f'/sb/keywords/{keywordid}'
@@ -645,10 +645,44 @@ class SBClient(BaseClient):
             param["startIndex"] += 5000
             yield info
 
+    def get_keyword_groupid(self, **param):
+        url_path = "/sb/keywords"
+        headers = {
+            "Accept": "application/vnd.sbkeyword.v3.2+json"
+        }
+        return self._request(url_path, params=param, headers=headers)
+
+    def iter_keyword_groupid(self, **param):
+        if "startIndex" not in param:
+            param["startIndex"] = 0
+            param["count"] = 5000
+        while True:
+            info: list = self.get_keywords(**param)
+            # print(info)
+            if len(info) == 0:
+                break
+            param["startIndex"] += 5000
+            yield info
+
     def get_targets(self, **body):
         url_path = "/sb/targets/list"
         return self._request(url_path, method="POST", body=body)
 
+    def get_sbthemetargeting(self, **body):
+        url_path = "/sb/themes/list"
+        return self._request(url_path, method="POST", body=body)
+
+    def iter_sbthemetargeting(self, **body):
+        if "maxResults" not in body:
+            body["maxResults"] = 100
+        while True:
+            info: dict = self.get_sbthemetargeting(**body)
+            # print(info)
+            yield from info["themes"]
+            if not info.get("nextToken"):
+                break
+            body["nextToken"] = info["nextToken"]
+
     def iter_targets(self, **body):
         if "maxResults" not in body:
             body["maxResults"] = 100

File diff suppressed because it is too large
+ 728 - 169
sync_amz_data/public/sp_api_client.py


+ 12 - 0
sync_amz_data/settings/__init__.py

@@ -23,6 +23,18 @@ else:
 #     "db": 0
 # }
 
+MYSQL_AUTH_CONF = dict(user="admin",
+                       password="NSYbBSPbkGQUbOSNOeyy",
+                       host="retail-data.cnrgrbcygoap.us-east-1.rds.amazonaws.com",
+                       database="ansjer_dvadmin",
+                       port=3306)
+
+MYSQL_DATA_CONF = dict(user=MYSQL_DATA_USER,
+                       password=MASQL_DATA_PASSWORD,
+                       host=MASQL_DATA_HOST,
+                       database="asj_ads",
+                       port=3306)
+
 AWS_LWA_CLIENT = {
     'lwa_client_id': 'amzn1.application-oa2-client.ebd701cd07854fb38c37ee49ec4ba109',
     'lwa_client_secret': 'cbf0514186db4df91e04a8905f0a91b605eae4201254ced879d8bb90df4b474d',

+ 4 - 0
sync_amz_data/settings/dev.py

@@ -5,3 +5,7 @@ LOG_LEVEL = 10
 LOG_HANDLERS = ["console"]
 
 DATA_PATH = "/tmp"
+
+MASQL_DATA_HOST = "192.168.1.225"
+MYSQL_DATA_USER = "root"
+MASQL_DATA_PASSWORD = "sandbox"

+ 5 - 0
sync_amz_data/settings/online.py

@@ -5,3 +5,8 @@ LOG_HANDLERS = ["console", "email"]  # 使用supervisor重定向日志文件
 
 DATA_PATH = "/var/run/amazon_ad_api"
 
+LOG_LEVEL = 10
+
+MASQL_DATA_HOST = "amzn-retail.cluster-cnrgrbcygoap.us-east-1.rds.amazonaws.com"
+MYSQL_DATA_USER = "admin"
+MASQL_DATA_PASSWORD = "pvmBNS8q3duiUvvp"

+ 1 - 1
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sb_keywordsbid_recommendations_v3.py

@@ -38,7 +38,7 @@ class SbkeywordsBidRecommendations:
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
         self.sbk_url_path = "api/ad_manage/sbkbrkeywords/"
-        self.upcreate_url_path = "api/ad_manage/sbkeywordsbidrecommendation/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbkeywordsbidrecommendation/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 2 - 2
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbads.py

@@ -33,7 +33,7 @@ class SbAds:
         self.profile_id = profile_id
         self.portfolioId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sbads/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbads/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
@@ -70,7 +70,7 @@ class SbAds:
         df['extendedData.lastUpdateDate'] = pd.to_datetime(df['extendedData.lastUpdateDate'],
                                                                unit='ms').dt.strftime('%Y-%m-%d %H:%M:%S')
         col = ['adId', 'campaignId', 'landingPage.asins', 'landingPage.pageType', 'landingPage.url', 'name', 'state',
-               'adGroupId', 'creative.asins', 'creative.type', 'extendedData.creationDate',
+               'adGroupId', 'creative.asins', 'creative.type', 'creative.headline', 'extendedData.creationDate',
                'extendedData.lastUpdateDate', 'extendedData.servingStatus', 'extendedData.servingStatusDetails']
         for i in col:
             if i not in df.columns:

+ 1 - 1
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbcampaign.py

@@ -63,7 +63,7 @@ class SbCampaign:
         self.profile_id = profile_id
         self.portfolioId = portfolioId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sbcampaigns/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbcampaigns/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 2 - 2
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbgroup.py

@@ -45,7 +45,7 @@ class SbGroup:
         self.profile_id = profile_id
         self.portfolioId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sbgroups/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbgroups/p/updata/"
         self.kid_url_path = "api/ad_manage/sbkeywordsadgroupId/"
         self.tid_url_path = "api/ad_manage/sbtargetsadgroupId/"
         self.aid_url_path = "api/ad_manage/sbadsadgroupId/"
@@ -134,7 +134,7 @@ class SbGroup:
                                           }})
         df_group = pd.json_normalize(list(list_group))
         return df_group
-    #----------------------------
+    # ----------------------------
 
     def dataconvert(self):
         df = self.get_spgroup_data()

+ 19 - 1
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbkeyword_v3.py

@@ -33,7 +33,8 @@ class SbKeyword:
         self.profile_id = profile_id
         self.portfolioId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sbkeywords/updata/"
+        self.gid_url_path = "api/ad_manage/getsbadgroupId/"
+        self.upcreate_url_path = "api/ad_manage/sbkeywords/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
@@ -57,6 +58,23 @@ class SbKeyword:
             out = None
         return out
 
+    def get_adgroup_data(self):
+        heads = self.heads
+        url_path = self.gid_url_path
+        data = []
+        page = 1
+        params = {'profile_id': self.profile_id, 'limit': 999, 'page': page}
+        tem = request(url_path=url_path, head=heads, params=params)
+        data.extend(tem.get('data'))
+        while tem.get('is_next') is True:
+            page += 1
+            params = {'profile_id': self.profile_id, 'limit': 999, 'page': page}
+            tem = request(url_path=url_path, head=heads, params=params)
+            data.extend(tem.get('data'))
+        df = pd.json_normalize(data)
+        out_list_id = df.adGroupId.to_list()
+        return out_list_id
+
     def get_sbkeywords_data(self):
         tem = SBClient(**self.AWS_CREDENTIALS)
         _ = tem.iter_keywords()

+ 1 - 1
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbnegativekeyword.py

@@ -33,7 +33,7 @@ class SbNegtiveKeyword:
         self.profile_id = profile_id
         self.portfolioId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sbnegativekeyword/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbnegativekeyword/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 2 - 2
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbtarget_v3.py

@@ -35,7 +35,7 @@ class SbTargets:
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
         self.gid_url_path = "api/ad_manage/getsbadgroupId/"
-        self.upcreate_url_path = "api/ad_manage/sbtargets/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbtargets/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
@@ -112,7 +112,7 @@ class SbTargets:
             tem = request(url_path=url_path, head=heads, params=params)
             data.extend(tem.get('data'))
         df = pd.json_normalize(data)
-        out_list_id = list(df.adGroupId.values)
+        out_list_id = df.adGroupId.to_list()
         return out_list_id
 
     def get_sbtargets_data_new(self):

+ 1 - 1
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbtargetbid_recommendations_v3.py

@@ -62,7 +62,7 @@ class SbtargetsBidRecommendations:
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
         self.sbk_url_path = "api/ad_manage/sbkbrtargets/"
-        self.upcreate_url_path = "api/ad_manage/sbtargetsbidrecommendation/updata/"
+        self.upcreate_url_path = "api/ad_manage/sbtargetsbidrecommendation/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 87 - 0
sync_amz_data/tasks/datainsert/SB/mysql_datainsert_sbthemetargeting_v3.py

@@ -0,0 +1,87 @@
+from sync_amz_data.DataTransform.Data_ETL import SB_ETL
+import requests
+from urllib.parse import urljoin
+from sync_amz_data.settings import AWS_LWA_CLIENT
+import pandas as pd
+import json
+from sync_amz_data.tasks.datainsert.wg import LADS
+from sync_amz_data.public.amz_ad_client import SBClient
+from typing import Literal
+
+
+class RateLimitError(Exception):
+    def __init__(self, retry_after: str = None):
+        self.retry_after = retry_after
+
+
+def request(url_path: str, method: str = "GET", head: dict = None, params: dict = None, body: dict = None, AD=LADS):
+    ADS = AD
+    resp = requests.session().request(
+        method=method,
+        url=urljoin(ADS, url_path),
+        headers=head,
+        params=params,
+        json=body,
+    )
+    if resp.status_code == 429:
+        raise RateLimitError(resp.headers.get("Retry-After"))
+    if resp.status_code >= 400:
+        raise Exception(resp.text)
+    return resp.json()
+
+
+class SbThemeTargeting:
+    def __init__(self, profile_id):
+        self.profile_id = profile_id
+        self.re_url_path = "api/ad_manage/profiles/"
+        self.upcreate_url_path = "api/ad_manage/sbthemetargeting/p/updata/"
+        self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
+        self.refresh_token = self.get_refresh_token()
+        self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
+        self.lwa_client_secret = AWS_LWA_CLIENT['lwa_client_secret']
+        self.AWS_CREDENTIALS = {
+            'lwa_client_id': self.lwa_client_id,
+            'lwa_client_secret': self.lwa_client_secret,
+            'refresh_token': self.refresh_token,
+            'profile_id': self.profile_id
+        }
+
+    def get_refresh_token(self):
+        params = {'profile_id': self.profile_id}
+        heads = self.heads
+        url_path = self.re_url_path
+        tem = request(url_path=url_path, head=heads, params=params)
+        if tem.get('data') is not None:
+            _ = tem.get('data')
+            out = _[0].get('refresh_token')
+        else:
+            out = None
+        return out
+
+    def get_sbthemetargeting_data(self):
+        tem = SBClient(**self.AWS_CREDENTIALS)
+        list_ads = tem.iter_sbthemetargeting(**{"maxResults": 5000})
+        df_ads = pd.json_normalize(list(list_ads))
+
+        df_ads.rename(columns={
+            'adGroupId': 'adGroup',
+            'campaignId': 'campaign',
+        }, inplace=True)
+        df_ads['profile'] = self.profile_id
+        return df_ads
+
+    def updata_create(self):
+        df_data = self.get_sbthemetargeting_data()
+        _ = df_data.to_json(orient='records', date_format='iso')
+        body = json.loads(_)
+        heads = self.heads
+        url_path = self.upcreate_url_path
+        tem = request(url_path=url_path, head=heads, body=body, method="POST")
+        return tem
+
+
+if __name__ == '__main__':
+    a = SbThemeTargeting(profile_id="3006125408623189")
+    # out = a.get_sbthemetargeting_data()
+    out = a.updata_create()
+    print(out)

+ 1 - 1
sync_amz_data/tasks/datainsert/SD/mysql_datainsert_sdcampaign.py

@@ -32,7 +32,7 @@ class SpCampaign:
         self.profile_id = profile_id
         self.portfolioId = portfolioId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sdcampaigns/updata/"
+        self.upcreate_url_path = "api/ad_manage/sdcampaigns/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sp_budget_recommendation.py

@@ -34,7 +34,7 @@ class SpBudgetRecommendation:
         self.portfolioId = portfolioId
         self.re_url_path = "api/ad_manage/profiles/"
         self.camid_url_path = "api/ad_manage/spcampaigns/?updata=1&limit=9999"
-        self.upcreate_url_path = "api/ad_manage/spbudgetrecommendation/updata/"
+        self.upcreate_url_path = "api/ad_manage/spbudgetrecommendation/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 5 - 3
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sp_targetsbid_recommendations.py

@@ -33,7 +33,7 @@ class SpTargetsBidRecommendations:
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
         self.cgk_url_path = "api/ad_manage/sptbrkeywords/"
-        self.upcreate_url_path = "api/ad_manage/sptargetsbidrecommendation/updata/"
+        self.upcreate_url_path = "api/ad_manage/sptargetsbidrecommendation/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
@@ -94,6 +94,8 @@ class SpTargetsBidRecommendations:
                         data = j.get('bidRecommendationsForTargetingExpressions')
                         out_data.extend(data)
                 temtest = pd.json_normalize(out_data)
+                if len(temtest) == 0:
+                    temtest = pd.DataFrame(data=[], columns=['value', 'type'])
                 temtest.rename(columns={'targetingExpression.value': 'value'}, inplace=True)
                 temtest.rename(columns={'targetingExpression.type': 'type'}, inplace=True)
                 df_tem = pd.merge(left=temtest, right=k_id_text_df, on=['value', 'type'], how='left')
@@ -124,6 +126,6 @@ class SpTargetsBidRecommendations:
 
 if __name__ == '__main__':
     a = SpTargetsBidRecommendations(profile_id="3006125408623189")
-    out = a.get_sptargetsbidrecommendation_data()
-    # out = a.updata_create()
+    # out = a.get_sptargetsbidrecommendation_data()
+    out = a.updata_create()
     print(out)

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sp_targetsbid_recommendations_v2.py

@@ -56,7 +56,7 @@ class SpTargetsBidRecommendationsV2:
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
         self.spt_url_path = "api/ad_manage/sptargetsv2/"
-        self.upcreate_url_path = "api/ad_manage/sptargetsbidrecommendationv2/updata/"
+        self.upcreate_url_path = "api/ad_manage/sptargetsbidrecommendationv2/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 4 - 4
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spads.py

@@ -32,7 +32,7 @@ class SpAds:
         self.profile_id = profile_id
         self.portfolioId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/spads/updata/"
+        self.upcreate_url_path = "api/ad_manage/spads/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
@@ -95,6 +95,6 @@ class SpAds:
 
 if __name__ == '__main__':
     a = SpAds(profile_id="3006125408623189")
-    out = a.updata_create()
-    # out = a.dataconvert()
-    print(out)
+    # out = a.updata_create()
+    out = a.get_spads_data()
+    print(out)

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spcampaign.py

@@ -52,7 +52,7 @@ class SpCampaign:
         self.profile_id = profile_id
         self.portfolioId = portfolioId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/spcampaigns/updata/?updata=1"
+        self.upcreate_url_path = "api/ad_manage/spcampaigns/p/updata/?updata=1"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spgroup.py

@@ -43,7 +43,7 @@ class SpGroup:
         self.profile_id = profile_id
         self.portfolioId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/spgroups/updata/?updata=1"
+        self.upcreate_url_path = "api/ad_manage/spgroups/p/updata/?updata=1"
         self.kid_url_path = "api/ad_manage/spkeywordsadgroupId/"
         self.tid_url_path = "api/ad_manage/sptargetsadgroupId/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spkeyword.py

@@ -32,7 +32,7 @@ class SpKeyword:
         self.profile_id = profile_id
         self.campaignId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/spkeywords/updata/?updata=1"
+        self.upcreate_url_path = "api/ad_manage/spkeywords/p/updata/?updata=1"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spnegativekeyword.py

@@ -32,7 +32,7 @@ class SpNegativeKeyword:
         self.profile_id = profile_id
         self.campaignId = campaignId
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/spnegativekeyword/updata/"
+        self.upcreate_url_path = "api/ad_manage/spnegativekeyword/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_spnegativetarget.py

@@ -33,7 +33,7 @@ class SpNegativeTarget:
     def __init__(self, profile_id):
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/spnegativetarget/updata/"
+        self.upcreate_url_path = "api/ad_manage/spnegativetarget/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 1 - 1
sync_amz_data/tasks/datainsert/SP/mysql_datainsert_sptarget.py

@@ -33,7 +33,7 @@ class SpTargets:
     def __init__(self, profile_id):
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/sptargets/updata/?updata=1"
+        self.upcreate_url_path = "api/ad_manage/sptargets/p/updata/?updata=1"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 22 - 0
sync_amz_data/tasks/datainsert/alldata_insert.py

@@ -18,6 +18,10 @@ from SB.mysql_datainsert_sb_keywordsbid_recommendations_v3 import SbkeywordsBidR
 from SB.mysql_datainsert_sbtarget_v3 import SbTargets
 from SB.mysql_datainsert_sbtargetbid_recommendations_v3 import SbtargetsBidRecommendations
 from SB.mysql_datainsert_sbnegativekeyword import SbNegtiveKeyword
+from SB.mysql_datainsert_sbthemetargeting_v3 import SbThemeTargeting
+
+from mysql_datainsert_assets import Assets
+from categories_updata import Categories
 
 import time
 
@@ -31,6 +35,12 @@ def protime(start_time):
     time.sleep(5)
 
 
+start_time = time.time()
+cg = Categories(profile_id="3006125408623189")
+cgo = cg.updata_create()
+print("Categories", cgo)
+protime(start_time)
+
 start_time = time.time()
 pf = Portfolios("3006125408623189")
 pfo = pf.updata_create()
@@ -145,3 +155,15 @@ Sbnk = SbNegtiveKeyword(profile_id="3006125408623189")
 Sbnko = Sbnk.updata_create()
 print("SbNegtiveKeyword", Sbnko)
 protime(start_time)
+
+start_time = time.time()
+Sbtt = SbThemeTargeting(profile_id="3006125408623189")
+Sbtto = Sbtt.updata_create()
+print("SbThemeTargeting", Sbtto)
+protime(start_time)
+
+start_time = time.time()
+ass = Assets(profile_id="3006125408623189")
+asso = ass.updata_create()
+print("Assets", asso)
+protime(start_time)

+ 44 - 0
sync_amz_data/tasks/datainsert/categories_updata.py

@@ -0,0 +1,44 @@
+import requests
+from urllib.parse import urljoin
+from sync_amz_data.tasks.datainsert.wg import LADS
+
+
+class RateLimitError(Exception):
+    def __init__(self, retry_after: str = None):
+        self.retry_after = retry_after
+
+
+def request(url_path: str, method: str = "GET", head: dict = None, params: dict = None, body: dict = None, AD=LADS):
+    ADS = AD
+    resp = requests.session().request(
+        method=method,
+        url=urljoin(ADS, url_path),
+        headers=head,
+        params=params,
+        json=body,
+    )
+    if resp.status_code == 429:
+        raise RateLimitError(resp.headers.get("Retry-After"))
+    if resp.status_code >= 400:
+        raise Exception(resp.text)
+    return resp.json()
+
+
+class Categories:
+    def __init__(self, profile_id):
+        self.profile_id = profile_id
+        self.upcreate_url_path = "api/ad_manage/targetable/categories/updata/"
+        self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
+
+    def updata_create(self):
+        heads = self.heads
+        url_path = self.upcreate_url_path
+        params = {"profile_id": self.profile_id}
+        tem = request(url_path=url_path, head=heads, method="GET", params=params)
+        return tem
+
+
+if __name__ == '__main__':
+    a = Categories(profile_id="3006125408623189")
+    out = a.updata_create()
+    print(out)

+ 110 - 0
sync_amz_data/tasks/datainsert/mysql_datainsert_assets.py

@@ -0,0 +1,110 @@
+import requests
+from urllib.parse import urljoin
+from sync_amz_data.public.amz_ad_client import BaseClient
+from sync_amz_data.settings import AWS_LWA_CLIENT
+import pandas as pd
+import json
+from sync_amz_data.tasks.datainsert.wg import LADS
+
+
+class Client(BaseClient):
+    def assets_search(self, **body):
+        url_path = "/assets/search"
+        headers = {
+            "Accept": "application/vnd.creativeassetsgetresponse.v3+json",
+            "Content-Type": "application/vnd.creativeassetsgetresponse.v3+json"
+        }
+        return self._request(url_path, method="POST", headers=headers, body=body)
+
+
+class RateLimitError(Exception):
+    def __init__(self, retry_after: str = None):
+        self.retry_after = retry_after
+
+
+def request(url_path: str, method: str = "GET", head: dict = None, params: dict = None, body: dict = None, AD = LADS):
+    ADS = AD
+    resp = requests.session().request(
+        method=method,
+        url=urljoin(ADS, url_path),
+        headers=head,
+        params=params,
+        json=body,
+    )
+    if resp.status_code == 429:
+        raise RateLimitError(resp.headers.get("Retry-After"))
+    if resp.status_code >= 400:
+        raise Exception(resp.text)
+    return resp.json()
+
+
+class Assets:
+    def __init__(self, profile_id):
+        self.profile_id = profile_id
+        self.re_url_path = "api/ad_manage/profiles/"
+        self.upcreate_url_path = "api/ad_manage/sb/assets/p/updata/"
+        self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
+        self.refresh_token = self.get_refresh_token()
+        self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']
+        self.lwa_client_secret = AWS_LWA_CLIENT['lwa_client_secret']
+        self.AWS_CREDENTIALS = {
+            'lwa_client_id': self.lwa_client_id,
+            'lwa_client_secret': self.lwa_client_secret,
+            'refresh_token': self.refresh_token,
+            'profile_id': self.profile_id
+        }
+
+    def get_refresh_token(self):
+        params = {'profile_id': self.profile_id}
+        heads = self.heads
+        url_path = self.re_url_path
+        tem = request(url_path=url_path, head=heads, params=params)
+        if tem.get('data') is not None:
+            _ = tem.get('data')
+            out = _[0].get('refresh_token')
+        else:
+            out = None
+        return out
+
+    def get_assets_data(self):
+        tem = Client(**self.AWS_CREDENTIALS)
+        tokentext = ""
+        assetList = []
+        while tokentext != None:
+            body = {
+                    "pageCriteria": {
+                        "identifier": {
+                            "token": tokentext
+                            },
+                        "size": 500
+                        }
+                    }
+            list_assets = tem.assets_search(**body)
+            tokentext = list_assets.get('token', None)
+            assetList.extend(list_assets.get('assetList', []))
+        df_assets = pd.DataFrame(assetList)
+        df_assets['profile_id'] = self.profile_id
+        return df_assets
+
+    def dataconvert(self):
+        df = self.get_assets_data()
+        df['creationTime'] = pd.to_datetime(df['creationTime'], unit='ms').dt.strftime('%Y-%m-%d')
+        df['lastUpdatedTime'] = pd.to_datetime(df['lastUpdatedTime'], unit='ms').dt.strftime('%Y-%m-%d')
+        df['assetSubTypeList'] = df['assetSubTypeList'].astype(str)
+        df['isDownloadable'] = df['isDownloadable'].astype(str)
+        json_data = json.loads(df.to_json(orient='records', force_ascii=False))
+        return json_data
+
+    def updata_create(self):
+        body = self.dataconvert()
+        heads = self.heads
+        url_path = self.upcreate_url_path
+        tem = request(url_path=url_path, head=heads, body=body, method="POST")
+        return tem
+
+
+if __name__ == '__main__':
+    a = Assets(profile_id="3006125408623189")
+    # out = a.dataconvert()
+    out = a.updata_create()
+    print(out)

+ 1 - 1
sync_amz_data/tasks/datainsert/mysql_datainsert_portfolios.py

@@ -32,7 +32,7 @@ class Portfolios:
     def __init__(self, profile_id):
         self.profile_id = profile_id
         self.re_url_path = "api/ad_manage/profiles/"
-        self.upcreate_url_path = "api/ad_manage/portfolios/updata/"
+        self.upcreate_url_path = "api/ad_manage/portfolios/p/updata/"
         self.heads = {'X-Token': "da4ab6bc5cbf1dfa"}
         self.refresh_token = self.get_refresh_token()
         self.lwa_client_id = AWS_LWA_CLIENT['lwa_client_id']

+ 2 - 1
sync_amz_data/tasks/datainsert/wg.py

@@ -1 +1,2 @@
-LADS = "http://192.168.1.39:8001/"
+LADS = "http://192.168.1.19:8001/"
+# LADS = "https://ads.vzzon.com"

+ 24 - 0
sync_get_open_listing_data.py

@@ -0,0 +1,24 @@
+import warnings
+warnings.filterwarnings('ignore')
+from apscheduler.schedulers.blocking import BlockingScheduler
+from sync_amz_data.public import sp_api_client
+def func_run():
+    try:
+        sp_api_client.SpApiRequest.get_allShops("GET_FLAT_FILE_OPEN_LISTINGS_DATA")
+    except:
+        sp_api_client.SpApiRequest.get_allShops("GET_FLAT_FILE_OPEN_LISTINGS_DATA")
+    print("="*40)
+    print("="*40)
+    print("="*40)
+    try:
+        sp_api_client.SpApiRequest.listing_infoTable()
+    except:
+        sp_api_client.SpApiRequest.listing_infoTable()
+# func_run()
+
+#
+#
+if __name__ == '__main__':
+    sched = BlockingScheduler()
+    sched.add_job(func_run,'cron',hour=4,minute=30,second=0)
+    sched.start()

+ 31 - 0
sync_get_order_data.py

@@ -0,0 +1,31 @@
+
+import warnings
+warnings.filterwarnings('ignore')
+from apscheduler.schedulers.blocking import BlockingScheduler
+from sync_amz_data.settings import MYSQL_AUTH_CONF, MYSQL_DATA_CONF
+from sync_amz_data.public import sp_api_client
+def func_run():
+    try:
+        for days in (-2,-3):
+            sp_api_client.SpApiRequest.get_allShops("GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_GENERAL",days=days,**{})
+    except Exception as e:
+        print(e)
+    try:
+        for days in (-2,-3): #range(-29,-2):#
+           sp_api_client.SpApiRequest.get_allShops("GET_SALES_AND_TRAFFIC_REPORT",days=days,**{"level":"SKU"})
+           sp_api_client.SpApiRequest.get_allShops("GET_SALES_AND_TRAFFIC_REPORT",days=days,**{"level":"PARENT"})
+           sp_api_client.SpApiRequest.get_allShops("GET_SALES_AND_TRAFFIC_REPORT",days=days,**{"level":"CHILD"})
+    except Exception as e:
+        print(e)
+
+
+# func_run()
+if __name__ == '__main__':
+
+    sched = BlockingScheduler()
+    sched.add_job(func_run, 'cron', hour=0, minute=0,
+                  second=30)
+    sched.start()
+
+
+

+ 33 - 0
sync_listing_order_Retry.py

@@ -0,0 +1,33 @@
+import warnings
+warnings.filterwarnings('ignore')
+from apscheduler.schedulers.blocking import BlockingScheduler
+from sync_amz_data.public import sp_api_client
+def func_run():
+    days = -2
+    try:
+        sp_api_client.SpApiRequest.get_allShops("GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_GENERAL",days=days,**{})
+    except Exception as e:
+        print(e)
+    try:
+       sp_api_client.SpApiRequest.get_allShops("GET_SALES_AND_TRAFFIC_REPORT",days=days,**{"level":"SKU"})
+       sp_api_client.SpApiRequest.get_allShops("GET_SALES_AND_TRAFFIC_REPORT",days=days,**{"level":"PARENT"})
+       sp_api_client.SpApiRequest.get_allShops("GET_SALES_AND_TRAFFIC_REPORT",days=days,**{"level":"CHILD"})
+    except Exception as e:
+        print(e)
+
+    print("-"*40)
+    try:
+        sp_api_client.SpApiRequest.get_allShops("GET_FLAT_FILE_OPEN_LISTINGS_DATA")
+    except Exception as e:
+        print(e)
+    print("="*40)
+    try:
+        sp_api_client.SpApiRequest.listing_infoTable()
+    except Exception as e:
+        print(e)
+
+#
+if __name__ == '__main__':
+    sched = BlockingScheduler()
+    sched.add_job(func_run,'cron',hour=18,minute=0,second=0)
+    sched.start()

Some files were not shown because too many files changed in this diff