Group Duplicate Column Ids in Pandas Dataframe

Group duplicate column IDs in pandas dataframe

Here's one NumPy approach -

def group_duplicate_cols(df):
a = df.values
sidx = np.lexsort(a)
b = a[:,sidx]

m = np.concatenate(([False], (b[:,1:] == b[:,:-1]).all(0), [False] ))
idx = np.flatnonzero(m[1:] != m[:-1])
C = df.columns[sidx].tolist()
return [C[i:j] for i,j in zip(idx[::2],idx[1::2]+1)]

Sample runs -

In [100]: df
Out[100]:
A B C D E F
a1 1 2 1 2 3 1
a2 2 4 2 4 4 1
a3 3 2 3 2 2 1
a4 4 1 4 1 1 1
a5 5 9 5 9 2 1

In [101]: group_duplicate_cols(df)
Out[101]: [['A', 'C'], ['B', 'D']]

# Let's add one more duplicate into group containing 'A'
In [102]: df.F = df.A

In [103]: group_duplicate_cols(df)
Out[103]: [['A', 'C', 'F'], ['B', 'D']]

Converting to do the same, but for rows(index), we just need to switch the operations along the other axis, like so -

def group_duplicate_rows(df):
a = df.values
sidx = np.lexsort(a.T)
b = a[sidx]

m = np.concatenate(([False], (b[1:] == b[:-1]).all(1), [False] ))
idx = np.flatnonzero(m[1:] != m[:-1])
C = df.index[sidx].tolist()
return [C[i:j] for i,j in zip(idx[::2],idx[1::2]+1)]

Sample run -

In [260]: df2
Out[260]:
a1 a2 a3 a4 a5
A 3 5 3 4 5
B 1 1 1 1 1
C 3 5 3 4 5
D 2 9 2 1 9
E 2 2 2 1 2
F 1 1 1 1 1

In [261]: group_duplicate_rows(df2)
Out[261]: [['B', 'F'], ['A', 'C']]

Benchmarking

Approaches -

# @John Galt's soln-1
from itertools import combinations
def combinations_app(df):
return[x for x in combinations(df.columns, 2) if (df[x[0]] == df[x[-1]]).all()]

# @Abdou's soln
def pandas_groupby_app(df):
return [tuple(d.index) for _,d in df.T.groupby(list(df.T.columns)) if len(d) > 1]

# @COLDSPEED's soln
def triu_app(df):
c = df.columns.tolist()
i, j = np.triu_indices(len(c), 1)
x = [(c[_i], c[_j]) for _i, _j in zip(i, j) if (df[c[_i]] == df[c[_j]]).all()]
return x

# @cmaher's soln
def lambda_set_app(df):
return list(filter(lambda x: len(x) > 1, list(set([tuple([x for x in df.columns if all(df[x] == df[y])]) for y in df.columns]))))

Note : @John Galt's soln-2 wasn't included because the inputs being of size (8000,500) would blow up with the proposed broadcasting for that one.

Timings -

In [179]: # Setup inputs with sizes as mentioned in the question
...: df = pd.DataFrame(np.random.randint(0,10,(8000,500)))
...: df.columns = ['C'+str(i) for i in range(df.shape[1])]
...: idx0 = np.random.choice(df.shape[1], df.shape[1]//2,replace=0)
...: idx1 = np.random.choice(df.shape[1], df.shape[1]//2,replace=0)
...: df.iloc[:,idx0] = df.iloc[:,idx1].values
...:

# @John Galt's soln-1
In [180]: %timeit combinations_app(df)
1 loops, best of 3: 24.6 s per loop

# @Abdou's soln
In [181]: %timeit pandas_groupby_app(df)
1 loops, best of 3: 3.81 s per loop

# @COLDSPEED's soln
In [182]: %timeit triu_app(df)
1 loops, best of 3: 25.5 s per loop

# @cmaher's soln
In [183]: %timeit lambda_set_app(df)
1 loops, best of 3: 27.1 s per loop

# Proposed in this post
In [184]: %timeit group_duplicate_cols(df)
10 loops, best of 3: 188 ms per loop

Super boost with NumPy's view functionality

Leveraging NumPy's view functionality that lets us view each group of elements as one dtype, we could gain further noticeable performance boost, like so -

def view1D(a): # a is array
a = np.ascontiguousarray(a)
void_dt = np.dtype((np.void, a.dtype.itemsize * a.shape[1]))
return a.view(void_dt).ravel()

def group_duplicate_cols_v2(df):
a = df.values
sidx = view1D(a.T).argsort()
b = a[:,sidx]

m = np.concatenate(([False], (b[:,1:] == b[:,:-1]).all(0), [False] ))
idx = np.flatnonzero(m[1:] != m[:-1])
C = df.columns[sidx].tolist()
return [C[i:j] for i,j in zip(idx[::2],idx[1::2]+1)]

Timings -

In [322]: %timeit group_duplicate_cols(df)
10 loops, best of 3: 185 ms per loop

In [323]: %timeit group_duplicate_cols_v2(df)
10 loops, best of 3: 69.3 ms per loop

Just crazy speedups!

How to duplicate column values across the same id value python / pandas

print(df.fillna(method='ffill'))

Description here

find duplicated groups in dataframe

You can do it like this

First sort_values just in case, set_index the id and stack to change the shape of your data and get a single column with to_frame

df_ = (df.sort_values(by=["value1","value2","value3"])
.set_index('id')[["value1","value2","value3"]]
.stack()
.to_frame()
)

Second, you can append an set_index with a cumcount per id, drop the level of index with the name of the original column (Value1 ...), unstack to get one row per id, fillna with a random value and use duplicated.

s_dup = df_.set_index([df_.groupby('id').cumcount()], append=True)\
.reset_index(level=1, drop=True)[0]\
.unstack()\
.fillna(0)\
.duplicated(keep=False)
print (s_dup)
id
A True
B False
C True
D False
dtype: bool

Now you can just map to the original dataframe:

df['dup'] = df['id'].map(s_dup)
print (df)
id value1 value2 value3 dup
0 A 1 1 1 True
1 A 2 2 2 True
2 A 3 3 3 True
3 A 4 4 4 True
4 B 1 1 1 False
5 B 2 2 2 False
6 C 2 2 2 True
7 C 1 1 1 True
8 C 3 3 3 True
9 C 4 4 4 True
10 D 1 1 1 False
11 D 2 2 2 False
12 D 3 3 3 False

Converting rows to wide columns based on duplicated ids in another column in pandas

You can group by ID, aggregate via list, and then create a new DataFrame from the results:

col = 'Publication_type'
new_df = pd.DataFrame(df.groupby('ID')[col].agg(lambda x: x.tolist()).tolist()).replace({None: np.nan})
new_df.columns = [f'{col}{i}' for i in new_df.columns + 1]
new_df['ID'] = df['ID'].drop_duplicates().reset_index(drop=True)

Output:

>>> df
Publication_type1 Publication_type2 Publication_type3 ID
0 Journal Clinical-study Guideline 1
1 Journal Letter NaN 2

Group duplicate column names in pandas DataFrame with missing values

Replace the nan elements with some value not present in your dataframe (e.g. -1 if you only have positive numbers), using pandas.fillna():

df.fillna(-1, inplace=True)

Will return:

    A    B    C    D    E    F    G    H    I
0 1.0 2.0 1.0 2.0 3.0 1.0 1.0 1.0 1.0
1 2.0 4.0 2.0 4.0 4.0 1.0 1.0 1.0 2.0
2 3.0 2.0 3.0 2.0 2.0 1.0 1.0 1.0 3.0
3 4.0 1.0 4.0 1.0 1.0 1.0 1.0 1.0 -1.0
4 5.0 9.0 5.0 9.0 2.0 1.0 -1.0 -1.0 5.0

Grouping by multiple columns to find duplicate rows pandas

You need duplicated with parameter subset for specify columns for check with keep=False for all duplicates for mask and filter by boolean indexing:

df = df[df.duplicated(subset=['val1','val2'], keep=False)]
print (df)
id val1 val2
0 1 1.1 2.2
1 1 1.1 2.2
3 3 8.8 6.2
4 4 1.1 2.2
5 5 8.8 6.2

Detail:

print (df.duplicated(subset=['val1','val2'], keep=False))
0 True
1 True
2 False
3 True
4 True
5 True
dtype: bool

Find duplicates in dataframe and group them by assigning a key

For a simple key with an integer ID, you can first convert the Links column to categorical data, then just obtain the category code from that:

df['GroupID'] = df['Links'].astype('category').cat.codes

Group duplicate columns and sum the corresponding column values using pandas

Let's try:

df['TIME'] = pd.to_datetime(df['TIME'])
ax = df.groupby('TIME')['BYTES'].sum().plot()
ax.set_xlim('13:00:00','13:03:00')

Output:

Sample Image



Related Topics



Leave a reply



Submit