diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 464d01ebb1f6fc..eb111d6657808d 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -8,16 +8,22 @@ import random import numpy as np import threading +from importlib import import_module + try: from pandas.compat import range except ImportError: pass np.random.seed(1234) -try: - import pandas._tseries as lib -except: - import pandas.libs.lib as lib + +# try em until it works! +for imp in ['pandas_tseries', 'pandas.lib', 'pandas.libs.lib']: + try: + lib = import_module(imp) + break + except: + pass try: Panel = Panel diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 07e615dbf65c3a..f242f0b48095c7 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -5,7 +5,7 @@ v0.20.0 (????, 2017) This is a major release from 0.19 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all -users upgrade to that version. +users upgrade to this version. Highlights include: @@ -255,7 +255,7 @@ Possible incompat for HDF5 formats for pandas < 0.13.0 been dropped in favor of ``pd.Series``. (:issue:``15098). This *may* cause HDF5 files that were created in prior versions to become unreadable if ``pd.TimeSeries`` -was used. This is most likely to be for pandas < 0.13.0. If you find yourself in that situation. +was used. This is most likely to be for pandas < 0.13.0. If you find yourself in this situation. You can use a recent prior version of pandas to read in your HDF5 files, then write them out again after applying the procedure below. @@ -460,16 +460,16 @@ If indicated, a deprecation warning will be issued if you reference that module. "pandas.json", "pandas.io.json.libjson", "X" "pandas.parser", "pandas.io.libparsers", "X" + "pandas.lib", "pandas.libs.lib", "X" "pandas.io.sas.saslib", "pandas.io.sas.libsas", "" "pandas.msgpack", "pandas.io.msgpack", "" - "pandas._testing", "pandas.util.libtesting", "" - "pandas._sparse", "pandas.sparse.libsparse", "" - "pandas._hash", "pandas.tools.libhash", "" "pandas.tslib", "pandas.libs.tslib", "" "pandas.index", "pandas.libs.index", "" "pandas.algos", "pandas.libs.algos", "" - "pandas.lib", "pandas.libs.lib", "X" "pandas.hashtable", "pandas.libs.hashtable", "" + "pandas._testing", "pandas.util.libtesting", "" + "pandas._sparse", "pandas.sparse.libsparse", "" + "pandas._hash", "pandas.tools.libhash", "" "pandas._window", "pandas.core.libwindow", "" "pandas._join", "pandas.libs.join", "" "pandas._period", "pandas.libs.period", "" @@ -533,7 +533,7 @@ New Behavior: Other API Changes ^^^^^^^^^^^^^^^^^ -- ``numexpr`` version is now required to be >= 2.4.6 and it will not be used at all if that requisite is not fulfilled (:issue:`15213`). +- ``numexpr`` version is now required to be >= 2.4.6 and it will not be used at all if this requisite is not fulfilled (:issue:`15213`). - ``CParserError`` has been renamed to ``ParserError`` in ``pd.read_csv`` and will be removed in the future (:issue:`12665`) - ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`) - ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`) @@ -599,7 +599,7 @@ Performance Improvements Bug Fixes ~~~~~~~~~ -- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously that raised ``ValueError`` (:issue:`15240`) +- Bug in ``Timestamp.replace`` now raises ``TypeError`` when incorrect argument names are given; previously this raised ``ValueError`` (:issue:`15240`) - Bug in ``Index`` power operations with reversed operands (:issue:`14973`) - Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`) - Bug in ``TimedeltaIndex`` raising a ``ValueError`` when boolean indexing with ``loc`` (:issue:`14946`) @@ -674,7 +674,7 @@ Bug Fixes - Bug in ``DataFrame.resample().median()`` if duplicate column names are present (:issue:`14233`) - Bug in ``DataFrame.groupby().describe()`` when grouping on ``Index`` containing tuples (:issue:`14848`) -- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; that will now raise ``ValueError`` (:issue:`15110`) +- Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`) - Bug in ``groupby().nunique()`` with a datetimelike-grouper where bins counts were incorrect (:issue:`13453`) - Bug in catching an overflow in ``Timestamp`` + ``Timedelta/Offset`` operations (:issue:`15126`) diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 0c5b704f2680cc..44cb96e9989714 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -66,7 +66,8 @@ def load_reduce(self): # 12588, extensions moving ('pandas._sparse', 'BlockIndex'): ('pandas.sparse.libsparse', 'BlockIndex'), ('pandas.tslib', 'Timestamp'): ('pandas.libs.tslib', 'Timestamp'), - ('pandas.tslib', '__nat_unpickle'): ('pandas.libs.tslib', '__nat_unpickle') + ('pandas.tslib', '__nat_unpickle'): ('pandas.libs.tslib', '__nat_unpickle'), + ('pandas._period', 'Period'): ('pandas.libs.period', 'Period') } diff --git a/pandas/core/common.py b/pandas/core/common.py index a194953c88ff99..285efa1c331626 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -8,8 +8,8 @@ from functools import partial import numpy as np -import pandas.libs.lib as lib -import pandas.libs.tslib as tslib +from pandas.libs import lib, tslib + from pandas import compat from pandas.compat import long, zip, iteritems from pandas.core.config import get_option @@ -476,7 +476,6 @@ def _where_compat(mask, arr1, arr2): new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8')) return new_vals.view(_NS_DTYPE) - import pandas.libs.tslib as tslib if arr1.dtype == _NS_DTYPE: arr1 = tslib.ints_to_pydatetime(arr1.view('i8')) if arr2.dtype == _NS_DTYPE: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 08c9647dc6f183..f7e508b74cce83 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -93,8 +93,7 @@ from pandas.formats.printing import pprint_thing import pandas.tools.plotting as gfx -import pandas.libs.lib as lib -import pandas.libs.algos as _algos +from pandas.libs import lib, algos as _algos from pandas.core.config import get_option diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cbb5fcdc8c116e..56528b53ed750b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6,12 +6,9 @@ import gc import numpy as np -import pandas.libs.lib as lib -from pandas.libs import tslib - import pandas as pd - +from pandas.libs import tslib, lib from pandas.types.common import (_coerce_to_dtype, _ensure_int64, needs_i8_conversion, diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 2f545bbbe03f01..9548c40c80dc5a 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -58,9 +58,9 @@ import pandas.core.algorithms as algos import pandas.core.common as com from pandas.core.config import option_context -import pandas.libs.lib as lib -from pandas.libs.lib import Timestamp -from pandas.libs import tslib, algos as _algos + +from pandas.libs import lib, tslib, algos as _algos +from pandas.libs.lib import Timestamp, count_level_2d _doc_template = """ @@ -4004,7 +4004,6 @@ def _apply_to_column_groupbys(self, func): def count(self): """ Compute count of group, excluding missing values """ from functools import partial - from pandas.libs.lib import count_level_2d from pandas.types.missing import _isnull_ndarraylike as isnull data, _ = self._get_data_to_aggregate() diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 99298be3a1787b..0bc5b1e43414e9 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -53,18 +53,17 @@ import pandas.core.missing as missing from pandas.sparse.array import _maybe_to_sparse, SparseArray -import pandas.libs.lib as lib -import pandas.libs.tslib as tslib +from pandas.libs import lib, tslib +from pandas.libs.tslib import Timedelta +from pandas.libs.lib import BlockPlacement + import pandas.computation.expressions as expressions from pandas.util.decorators import cache_readonly from pandas.util.validators import validate_bool_kwarg -from pandas.libs.tslib import Timedelta from pandas import compat, _np_version_under1p9 from pandas.compat import range, map, zip, u -from pandas.libs.lib import BlockPlacement - class Block(PandasObject): """ diff --git a/pandas/core/missing.py b/pandas/core/missing.py index fe43039b0786d6..e0d64f1b3d0c55 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -5,8 +5,8 @@ import numpy as np from distutils.version import LooseVersion -from pandas.libs import algos -import pandas.libs.lib as lib +from pandas.libs import algos, lib + from pandas.compat import range, string_types from pandas.types.common import (is_numeric_v_string_like, is_float_dtype, is_datetime64_dtype, diff --git a/pandas/core/ops.py b/pandas/core/ops.py index d004dd45876f7a..077dcb98dfa3d9 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -10,11 +10,14 @@ import numpy as np import pandas as pd import datetime + +from pandas.libs import lib, index as _index, tslib, algos as _algos +from pandas.libs.tslib import iNaT + from pandas import compat from pandas.util.decorators import Appender import pandas.computation.expressions as expressions -from pandas.libs import lib, index as _index, tslib, algos as _algos -from pandas.libs.tslib import iNaT + from pandas.compat import bind_method import pandas.core.missing as missing diff --git a/pandas/core/window.py b/pandas/core/window.py index 6130e0a6b522c1..6fda60c449f429 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -24,9 +24,10 @@ needs_i8_conversion, is_timedelta64_dtype, is_list_like, - _ensure_float64) + _ensure_float64, + is_scalar) import pandas as pd -from pandas.libs.lib import isscalar + from pandas.core.base import (PandasObject, SelectionMixin, GroupByMixin) import pandas.core.common as com @@ -154,7 +155,7 @@ def _gotitem(self, key, ndim, subset=None): self = self._shallow_copy(subset) self._reset_cache() if subset.ndim == 2: - if isscalar(key) and key in subset or is_list_like(key): + if is_scalar(key) and key in subset or is_list_like(key): self._selection = key return self diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index c442646ee1ac65..1cd2bc72f10f4d 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -44,8 +44,7 @@ from pandas.core.config import get_option from pandas.computation.pytables import Expr, maybe_expression -import pandas.libs.lib as lib -from pandas.libs import tslib, algos +from pandas.libs import tslib, algos, lib from distutils.version import LooseVersion diff --git a/pandas/tests/io/data/gbq_fake_job.txt b/pandas/tests/io/data/gbq_fake_job.txt index b329ef05de13df..2a0f09bc66ef3a 100644 --- a/pandas/tests/io/data/gbq_fake_job.txt +++ b/pandas/tests/io/data/gbq_fake_job.txt @@ -1 +1 @@ -{u'status': {u'state': u'DONE'}, u'kind': u'bigquery#job', u'statistics': {u'query': {u'cacheHit': True, u'totalBytesProcessed': u'0'}, u'endTime': u'1377668744674', u'totalBytesProcessed': u'0', u'startTime': u'1377668744466'}, u'jobReference': {u'projectId': u'57288129629', u'jobId': u'bqjob_r5f956972f0190bdf_00000140c374bf42_2'}, u'etag': u'"4PTsVxg68bQkQs1RJ1Ndewqkgg4/oO4VmgFrAku4N6FWci9s7iFIftc"', u'configuration': {u'query': {u'createDisposition': u'CREATE_IF_NEEDED', u'query': u'SELECT * FROM [publicdata:samples.shakespeare]', u'writeDisposition': u'WRITE_TRUNCATE', u'destinationTable': {u'projectId': u'57288129629', u'tableId': u'anonb5ec450da88eeeb78a27784ea482ee75a146d442', u'datasetId': u'_d0b4f5f0d50dc68a3eb0fa6cba66a9a8687d9253'}}}, u'id': u'57288129629:bqjob_r5f956972f0190bdf_00000140c374bf42_2', u'selfLink': u'https://www.googleapis.com/bigquery/v2/projects/57288129629/jobs/bqjob_r5f956972f0190bdf_00000140c374bf42_2'} +{u'status': {u'state': u'DONE'}, u'kind': u'bigquery#job', u'statistics': {u'query': {u'cacheHit': True, u'totalBytesProcessed': u'0'}, u'endTime': u'1377668744674', u'totalBytesProcessed': u'0', u'startTime': u'1377668744466'}, u'jobReference': {u'projectId': u'57288129629', u'jobId': u'bqjob_r5f956972f0190bdf_00000140c374bf42_2'}, u'etag': u'"4PTsVxg68bQkQs1RJ1Ndewqkgg4/oO4VmgFrAku4N6FWci9s7iFIftc"', u'configuration': {u'query': {u'createDisposition': u'CREATE_IF_NEEDED', u'query': u'SELECT * FROM [publicdata:samples.shakespeare]', u'writeDisposition': u'WRITE_TRUNCATE', u'destinationTable': {u'projectId': u'57288129629', u'tableId': u'anonb5ec450da88eeeb78a27784ea482ee75a146d442', u'datasetId': u'_d0b4f5f0d50dc68a3eb0fa6cba66a9a8687d9253'}}}, u'id': u'57288129629:bqjob_r5f956972f0190bdf_00000140c374bf42_2', u'selfLink': u'https://www.googleapis.com/bigquery/v2/projects/57288129629/jobs/bqjob_r5f956972f0190bdf_00000140c374bf42_2'} \ No newline at end of file diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index a3932082409d37..adbee6734b71c5 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -17,8 +17,8 @@ from pandas.tseries.offsets import DateOffset from pandas.util.decorators import cache_readonly, deprecate_kwarg import pandas.tseries.offsets as offsets -import pandas.libs.lib as lib -import pandas.libs.tslib as tslib + +from pandas.libs import lib, tslib from pandas.libs.tslib import Timedelta from pytz import AmbiguousTimeError