Skip to content

Commit

Permalink
fix asv for imports
Browse files Browse the repository at this point in the history
more cleaning
  • Loading branch information
jreback committed Mar 1, 2017
1 parent d19e7a7 commit 679a3ce
Show file tree
Hide file tree
Showing 13 changed files with 45 additions and 43 deletions.
14 changes: 10 additions & 4 deletions asv_bench/benchmarks/pandas_vb_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,22 @@
import random
import numpy as np
import threading
from importlib import import_module

try:
from pandas.compat import range
except ImportError:
pass

np.random.seed(1234)
try:
import pandas._tseries as lib
except:
import pandas.libs.lib as lib

# try em until it works!
for imp in ['pandas_tseries', 'pandas.lib', 'pandas.libs.lib']:
try:
lib = import_module(imp)
break
except:
pass

try:
Panel = Panel
Expand Down
18 changes: 9 additions & 9 deletions doc/source/whatsnew/v0.20.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ v0.20.0 (????, 2017)

This is a major release from 0.19 and includes a small number of API changes, several new features,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to that version.
users upgrade to this version.

Highlights include:

Expand Down Expand Up @@ -255,7 +255,7 @@ Possible incompat for HDF5 formats for pandas < 0.13.0
been dropped in favor of ``pd.Series``. (:issue:``15098).

This *may* cause HDF5 files that were created in prior versions to become unreadable if ``pd.TimeSeries``
was used. This is most likely to be for pandas < 0.13.0. If you find yourself in that situation.
was used. This is most likely to be for pandas < 0.13.0. If you find yourself in this situation.
You can use a recent prior version of pandas to read in your HDF5 files,
then write them out again after applying the procedure below.

Expand Down Expand Up @@ -460,16 +460,16 @@ If indicated, a deprecation warning will be issued if you reference that module.

"pandas.json", "pandas.io.json.libjson", "X"
"pandas.parser", "pandas.io.libparsers", "X"
"pandas.lib", "pandas.libs.lib", "X"
"pandas.io.sas.saslib", "pandas.io.sas.libsas", ""
"pandas.msgpack", "pandas.io.msgpack", ""
"pandas._testing", "pandas.util.libtesting", ""
"pandas._sparse", "pandas.sparse.libsparse", ""
"pandas._hash", "pandas.tools.libhash", ""
"pandas.tslib", "pandas.libs.tslib", ""
"pandas.index", "pandas.libs.index", ""
"pandas.algos", "pandas.libs.algos", ""
"pandas.lib", "pandas.libs.lib", "X"
"pandas.hashtable", "pandas.libs.hashtable", ""
"pandas._testing", "pandas.util.libtesting", ""
"pandas._sparse", "pandas.sparse.libsparse", ""
"pandas._hash", "pandas.tools.libhash", ""
"pandas._window", "pandas.core.libwindow", ""
"pandas._join", "pandas.libs.join", ""
"pandas._period", "pandas.libs.period", ""
Expand Down Expand Up @@ -533,7 +533,7 @@ New Behavior:
Other API Changes
^^^^^^^^^^^^^^^^^

- ``numexpr`` version is now required to be >= 2.4.6 and it will not be used at all if that requisite is not fulfilled (:issue:`15213`).
- ``numexpr`` version is now required to be >= 2.4.6 and it will not be used at all if this requisite is not fulfilled (:issue:`15213`).
- ``CParserError`` has been renamed to ``ParserError`` in ``pd.read_csv`` and will be removed in the future (:issue:`12665`)
- ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`)
- ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`)
Expand Down Expand Up @@ -599,7 +599,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~

- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously that raised ``ValueError`` (:issue:`15240`)
- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`)
- Bug in ``Index`` power operations with reversed operands (:issue:`14973`)
- Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`)
- Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`)
Expand Down Expand Up @@ -674,7 +674,7 @@ Bug Fixes
- Bug in ``DataFrame.resample().median()`` if duplicate column names are present (:issue:`14233`)

- Bug in ``DataFrame.groupby().describe()`` when grouping on ``Index`` containing tuples (:issue:`14848`)
- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; that will now raise ``ValueError`` (:issue:`15110`)
- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`)
- Bug in ``groupby().nunique()`` with a datetimelike-grouper where bins counts were incorrect (:issue:`13453`)

- Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`)
Expand Down
5 changes: 2 additions & 3 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from functools import partial

import numpy as np
import pandas.libs.lib as lib
import pandas.libs.tslib as tslib
from pandas.libs import lib, tslib

from pandas import compat
from pandas.compat import long, zip, iteritems
from pandas.core.config import get_option
Expand Down Expand Up @@ -476,7 +476,6 @@ def _where_compat(mask, arr1, arr2):
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)

import pandas.libs.tslib as tslib
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,7 @@
from pandas.formats.printing import pprint_thing
import pandas.tools.plotting as gfx

import pandas.libs.lib as lib
import pandas.libs.algos as _algos
from pandas.libs import lib, algos as _algos

from pandas.core.config import get_option

Expand Down
5 changes: 1 addition & 4 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,9 @@
import gc

import numpy as np
import pandas.libs.lib as lib
from pandas.libs import tslib

import pandas as pd


from pandas.libs import tslib, lib
from pandas.types.common import (_coerce_to_dtype,
_ensure_int64,
needs_i8_conversion,
Expand Down
7 changes: 3 additions & 4 deletions pandas/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.config import option_context
import pandas.libs.lib as lib
from pandas.libs.lib import Timestamp
from pandas.libs import tslib, algos as _algos

from pandas.libs import lib, tslib, algos as _algos
from pandas.libs.lib import Timestamp, count_level_2d

_doc_template = """
Expand Down Expand Up @@ -4004,7 +4004,6 @@ def _apply_to_column_groupbys(self, func):
def count(self):
""" Compute count of group, excluding missing values """
from functools import partial
from pandas.libs.lib import count_level_2d
from pandas.types.missing import _isnull_ndarraylike as isnull

data, _ = self._get_data_to_aggregate()
Expand Down
9 changes: 4 additions & 5 deletions pandas/core/internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,17 @@

import pandas.core.missing as missing
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.libs.lib as lib
import pandas.libs.tslib as tslib
from pandas.libs import lib, tslib
from pandas.libs.tslib import Timedelta
from pandas.libs.lib import BlockPlacement

import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.util.validators import validate_bool_kwarg

from pandas.libs.tslib import Timedelta
from pandas import compat, _np_version_under1p9
from pandas.compat import range, map, zip, u

from pandas.libs.lib import BlockPlacement


class Block(PandasObject):
"""
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import numpy as np
from distutils.version import LooseVersion

from pandas.libs import algos
import pandas.libs.lib as lib
from pandas.libs import algos, lib

from pandas.compat import range, string_types
from pandas.types.common import (is_numeric_v_string_like,
is_float_dtype, is_datetime64_dtype,
Expand Down
7 changes: 5 additions & 2 deletions pandas/core/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,14 @@
import numpy as np
import pandas as pd
import datetime

from pandas.libs import lib, index as _index, tslib, algos as _algos
from pandas.libs.tslib import iNaT

from pandas import compat
from pandas.util.decorators import Appender
import pandas.computation.expressions as expressions
from pandas.libs import lib, index as _index, tslib, algos as _algos
from pandas.libs.tslib import iNaT

from pandas.compat import bind_method
import pandas.core.missing as missing

Expand Down
7 changes: 4 additions & 3 deletions pandas/core/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,10 @@
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64)
_ensure_float64,
is_scalar)
import pandas as pd
from pandas.libs.lib import isscalar

from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
Expand Down Expand Up @@ -154,7 +155,7 @@ def _gotitem(self, key, ndim, subset=None):
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if isscalar(key) and key in subset or is_list_like(key):
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self

Expand Down
3 changes: 1 addition & 2 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@
from pandas.core.config import get_option
from pandas.computation.pytables import Expr, maybe_expression

import pandas.libs.lib as lib
from pandas.libs import tslib, algos
from pandas.libs import tslib, algos, lib

from distutils.version import LooseVersion

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/data/gbq_fake_job.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{u'status': {u'state': u'DONE'}, u'kind': u'bigquery#job', u'statistics': {u'query': {u'cacheHit': True, u'totalBytesProcessed': u'0'}, u'endTime': u'1377668744674', u'totalBytesProcessed': u'0', u'startTime': u'1377668744466'}, u'jobReference': {u'projectId': u'57288129629', u'jobId': u'bqjob_r5f956972f0190bdf_00000140c374bf42_2'}, u'etag': u'"4PTsVxg68bQkQs1RJ1Ndewqkgg4/oO4VmgFrAku4N6FWci9s7iFIftc"', u'configuration': {u'query': {u'createDisposition': u'CREATE_IF_NEEDED', u'query': u'SELECT * FROM [publicdata:samples.shakespeare]', u'writeDisposition': u'WRITE_TRUNCATE', u'destinationTable': {u'projectId': u'57288129629', u'tableId': u'anonb5ec450da88eeeb78a27784ea482ee75a146d442', u'datasetId': u'_d0b4f5f0d50dc68a3eb0fa6cba66a9a8687d9253'}}}, u'id': u'57288129629:bqjob_r5f956972f0190bdf_00000140c374bf42_2', u'selfLink': u'https://www.googleapis.com/bigquery/v2/projects/57288129629/jobs/bqjob_r5f956972f0190bdf_00000140c374bf42_2'}
{u'status': {u'state': u'DONE'}, u'kind': u'bigquery#job', u'statistics': {u'query': {u'cacheHit': True, u'totalBytesProcessed': u'0'}, u'endTime': u'1377668744674', u'totalBytesProcessed': u'0', u'startTime': u'1377668744466'}, u'jobReference': {u'projectId': u'57288129629', u'jobId': u'bqjob_r5f956972f0190bdf_00000140c374bf42_2'}, u'etag': u'"4PTsVxg68bQkQs1RJ1Ndewqkgg4/oO4VmgFrAku4N6FWci9s7iFIftc"', u'configuration': {u'query': {u'createDisposition': u'CREATE_IF_NEEDED', u'query': u'SELECT * FROM [publicdata:samples.shakespeare]', u'writeDisposition': u'WRITE_TRUNCATE', u'destinationTable': {u'projectId': u'57288129629', u'tableId': u'anonb5ec450da88eeeb78a27784ea482ee75a146d442', u'datasetId': u'_d0b4f5f0d50dc68a3eb0fa6cba66a9a8687d9253'}}}, u'id': u'57288129629:bqjob_r5f956972f0190bdf_00000140c374bf42_2', u'selfLink': u'https://www.googleapis.com/bigquery/v2/projects/57288129629/jobs/bqjob_r5f956972f0190bdf_00000140c374bf42_2'}
4 changes: 2 additions & 2 deletions pandas/tseries/frequencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
from pandas.tseries.offsets import DateOffset
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.tseries.offsets as offsets
import pandas.libs.lib as lib
import pandas.libs.tslib as tslib

from pandas.libs import lib, tslib
from pandas.libs.tslib import Timedelta
from pytz import AmbiguousTimeError

Expand Down

0 comments on commit 679a3ce

Please sign in to comment.