Skip to content

Commit

Permalink
updating logging pattern and trimming deprecated code from create_spl…
Browse files Browse the repository at this point in the history
…itcache helper
  • Loading branch information
lockefox committed Jun 11, 2018
1 parent 6de8eb0 commit 82562b2
Showing 1 changed file with 18 additions and 60 deletions.
78 changes: 18 additions & 60 deletions scripts/create_splitcache.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)

LOGGER = p_logging.DEFAULT_LOGGER
CACHE_PATH = path.join(ROOT, 'publicAPI', 'cache')
CONFIG = p_config.ProsperConfig(path.join(HERE, 'app.cfg'))
makedirs(CACHE_PATH, exist_ok=True)
PROGNAME = 'splitcache_helper'

REGION_LIST = [
10000001, #'Derelik',
Expand Down Expand Up @@ -101,27 +101,20 @@ def fetch_data(
region_id,
data_range,
data_source,
logger=LOGGER
logger=logging.getLogger(PROGNAME)
):
"""fetch/crunch data for cache
Args:
type_id (int): EVE Online type_id for data
data_range (int): days of back-propogation to fetch
data_source (:enum:`DataSources`): which data source to fetch
logger (:obj:`logging.logger`, optional): logging handle for printing
logger (:obj:`logging.logger`): logging handle for printing
Returns:
(:obj:`pandas.DataFrame`): data for caching
pandas.DataFrame: data for caching
"""
# if data_source == DataSources.CREST:
# data = fetch_crest(
# type_id,
# region_id,
# data_range,
# logger
# )
if data_source == DataSources.ESI:
data = fetch_esi(
type_id,
Expand All @@ -148,56 +141,22 @@ def fetch_data(
return data

CREST_MAX = 400
# def fetch_crest(
# type_id,
# region_id,
# data_range=400,
# logger=LOGGER
# ):
# """fetch data from CREST endpoint

# Args:
# type_id (int): EVE Online type_id
# region_id (int): EVE Online region_id
# data_range (int, optional): days of back-propogation
# logger (:obj:`logging.logger`, optional) logging handle

# Returns:
# (:obj:`pandas.DataFrame`): data from endpoint

# """
# logger.info('--Fetching price history: CREST')
# if data_range > CREST_MAX:
# warning_msg = 'CREST only returns %d days' % CREST_MAX
# warnings.warn(warning_msg, UserWarning)
# logger.warning(warning_msg)

# data = crest_utils.fetch_market_history(
# region_id,
# type_id,
# mode=api_utils.SwitchCCPSource.CREST,
# config=CONFIG,
# logger=logger
# )

# return data.tail(n=data_range)

def fetch_esi(
type_id,
region_id,
data_range=400,
logger=LOGGER
logger=logging.getLogger(PROGNAME)
):
"""fetch data from ESI endpoint
Args:
type_id (int): EVE Online type_id
region_id (int): EVE Online region_id
data_range (int, optional): days of back-propogation
logger (:obj:`logging.logger`, optional) logging handle
logger (:obj:`logging.logger`) logging handle
Returns:
(:obj:`pandas.DataFrame`): data from endpoint
pandas.DataFrame: data from endpoint
"""
logger.info('--Fetching price history: ESI')
Expand All @@ -219,18 +178,18 @@ def fetch_emd(
type_id,
region_id,
data_range=400,
logger=LOGGER
logger=logging.getLogger(PROGNAME)
):
"""fetch data from eve-marketdata endpoint
Args:
type_id (int): EVE Online type_id
region_id (int): EVE Online region_id
data_range (int, optional): days of back-propogation
logger (:obj:`logging.logger`, optional) logging handle
logger (:obj:`logging.logger`) logging handle
Returns:
(:obj:`pandas.DataFrame`): data from endpoint
pandas.DataFrame: data from endpoint
"""
logger.info('--Fetching price history: EMD')
Expand All @@ -251,7 +210,7 @@ def write_to_cache_file(
cache_path,
type_id=0,
region_id=0,
logger=LOGGER
logger=logging.getLogger(PROGNAME)
):
"""save data to tinyDB
Expand All @@ -260,7 +219,7 @@ def write_to_cache_file(
cache_path (str): path to cache file
type_id (int, optional): EVE Online type_id
region_id (int, optional): EVE Online region_id
logger (:obj:`logging.logger`, optional): logging handle
logger (:obj:`logging.logger`): logging handle
Returns:
None
Expand Down Expand Up @@ -359,20 +318,19 @@ def override_region_list(self, region_str):

def main(self):
"""application runtime"""
global LOGGER
LOGGER = self.__log_builder.logger
logger = self.__log_builder.logger

LOGGER.info('hello world')
logger.info('hello world')

for region_id in cli.terminal.Progress(self.region_list):
for type_id in self.type_id:
LOGGER.info('Fetching: {0}@{1}'.format(type_id, region_id))
logger.info('Fetching: {0}@{1}'.format(type_id, region_id))
data = fetch_data(
type_id,
region_id,
self.back_range,
self.data_source,
LOGGER
logger
)
if self.force:
## WARNING: deletes old cache values ##
Expand All @@ -381,13 +339,13 @@ def main(self):
self.cache_path,
type_id=type_id,
region_id=region_id,
logger=LOGGER
logger=logger
)
else:
write_to_cache_file(
data,
self.cache_path,
logger=LOGGER
logger=logger
)

if __name__ == '__main__':
Expand Down

0 comments on commit 82562b2

Please sign in to comment.