Skip to content

Commit

Permalink
Handle duplicate column names in select_dtypes and get_dummies (#20839)
Browse files Browse the repository at this point in the history
  • Loading branch information
kunalgosar authored and jreback committed May 5, 2018
1 parent e8e6e89 commit bd4332f
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 22 deletions.
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.23.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1359,6 +1359,7 @@ Reshaping
- Bug in :meth:`DataFrame.astype` where column metadata is lost when converting to categorical or a dictionary of dtypes (:issue:`19920`)
- Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`)
- Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`)
- Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`)

Other
^^^^^
Expand Down
12 changes: 6 additions & 6 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -3076,15 +3076,15 @@ def select_dtypes(self, include=None, exclude=None):
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)

def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)

for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
exclude_these.iloc[idx] = not any(map(f, exclude))

dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
Expand Down
43 changes: 27 additions & 16 deletions pandas/core/reshape/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -821,50 +821,61 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
from pandas.core.reshape.concat import concat
from itertools import cycle

dtypes_to_encode = ['object', 'category']

if isinstance(data, DataFrame):
# determine columns being encoded

if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
columns_to_encode = columns
data_to_encode = data[columns]

# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")

if is_list_like(item):
if not len(item) == len(columns_to_encode):
len_msg = len_msg.format(name=name, len_item=len(item),
len_enc=len(columns_to_encode))
if not len(item) == data_to_encode.shape[1]:
len_msg = \
len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)

check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')

if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
prefix = [prefix[col] for col in data_to_encode.columns]

if prefix is None:
prefix = columns_to_encode
prefix = data_to_encode.columns

# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]

if set(columns_to_encode) == set(data.columns):
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]

for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):

dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]

for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
Expand Down
17 changes: 17 additions & 0 deletions pandas/tests/frame/test_dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,23 @@ def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
ei = df[['b', 'c', 'f', 'k']]
assert_frame_equal(ri, ei)

def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = compat.OrderedDict
df = DataFrame(odict([('a', list('abc')),
('b', list(range(1, 4))),
('c', np.arange(3, 6).astype('u1')),
('d', np.arange(4.0, 7.0, dtype='float64')),
('e', [True, False, True]),
('f', pd.date_range('now', periods=3).values)]))
df.columns = ['a', 'a', 'b', 'b', 'b', 'c']

expected = DataFrame({'a': list(range(1, 4)),
'b': np.arange(3, 6).astype('u1')})

result = df.select_dtypes(include=[np.number], exclude=['floating'])
assert_frame_equal(result, expected)

def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
Expand Down
15 changes: 15 additions & 0 deletions pandas/tests/reshape/test_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,21 @@ def test_get_dummies_dont_sparsify_all_columns(self, sparse):

tm.assert_frame_equal(df[['GDP']], df2)

def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)

expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)

expected = expected.astype({"A": np.int64})

tm.assert_frame_equal(result, expected)


class TestCategoricalReshape(object):

Expand Down

0 comments on commit bd4332f

Please sign in to comment.