Is it possible to specify your own distance function using scikit-learn K-Means Clustering?
Here's a small kmeans that uses any of the 20-odd distances in
scipy.spatial.distance, or a user function.
Comments would be welcome (this has had only one user so far, not enough);
in particular, what are your N, dim, k, metric ?
#!/usr/bin/env python
# kmeans.py using any of the 20-odd metrics in scipy.spatial.distance
# kmeanssample 2 pass, first sample sqrt(N)
from __future__ import division
import random
import numpy as np
from scipy.spatial.distance import cdist # $scipy/spatial/distance.py
# http://docs.scipy.org/doc/scipy/reference/spatial.html
from scipy.sparse import issparse # $scipy/sparse/csr.py
__date__ = "2011-11-17 Nov denis"
# X sparse, any cdist metric: real app ?
# centres get dense rapidly, metrics in high dim hit distance whiteout
# vs unsupervised / semi-supervised svm
#...............................................................................
def kmeans( X, centres, delta=.001, maxiter=10, metric="euclidean", p=2, verbose=1 ):
""" centres, Xtocentre, distances = kmeans( X, initial centres ... )
in:
X N x dim may be sparse
centres k x dim: initial centres, e.g. random.sample( X, k )
delta: relative error, iterate until the average distance to centres
is within delta of the previous average distance
maxiter
metric: any of the 20-odd in scipy.spatial.distance
"chebyshev" = max, "cityblock" = L1, "minkowski" with p=
or a function( Xvec, centrevec ), e.g. Lqmetric below
p: for minkowski metric -- local mod cdist for 0 < p < 1 too
verbose: 0 silent, 2 prints running distances
out:
centres, k x dim
Xtocentre: each X -> its nearest centre, ints N -> k
distances, N
see also: kmeanssample below, class Kmeans below.
"""
if not issparse(X):
X = np.asanyarray(X) # ?
centres = centres.todense() if issparse(centres) \
else centres.copy()
N, dim = X.shape
k, cdim = centres.shape
if dim != cdim:
raise ValueError( "kmeans: X %s and centres %s must have the same number of columns" % (
X.shape, centres.shape ))
if verbose:
print "kmeans: X %s centres %s delta=%.2g maxiter=%d metric=%s" % (
X.shape, centres.shape, delta, maxiter, metric)
allx = np.arange(N)
prevdist = 0
for jiter in range( 1, maxiter+1 ):
D = cdist_sparse( X, centres, metric=metric, p=p ) # |X| x |centres|
xtoc = D.argmin(axis=1) # X -> nearest centre
distances = D[allx,xtoc]
avdist = distances.mean() # median ?
if verbose >= 2:
print "kmeans: av |X - nearest centre| = %.4g" % avdist
if (1 - delta) * prevdist <= avdist <= prevdist \
or jiter == maxiter:
break
prevdist = avdist
for jc in range(k): # (1 pass in C)
c = np.where( xtoc == jc )[0]
if len(c) > 0:
centres[jc] = X[c].mean( axis=0 )
if verbose:
print "kmeans: %d iterations cluster sizes:" % jiter, np.bincount(xtoc)
if verbose >= 2:
r50 = np.zeros(k)
r90 = np.zeros(k)
for j in range(k):
dist = distances[ xtoc == j ]
if len(dist) > 0:
r50[j], r90[j] = np.percentile( dist, (50, 90) )
print "kmeans: cluster 50 % radius", r50.astype(int)
print "kmeans: cluster 90 % radius", r90.astype(int)
# scale L1 / dim, L2 / sqrt(dim) ?
return centres, xtoc, distances
#...............................................................................
def kmeanssample( X, k, nsample=0, **kwargs ):
""" 2-pass kmeans, fast for large N:
1) kmeans a random sample of nsample ~ sqrt(N) from X
2) full kmeans, starting from those centres
"""
# merge w kmeans ? mttiw
# v large N: sample N^1/2, N^1/2 of that
# seed like sklearn ?
N, dim = X.shape
if nsample == 0:
nsample = max( 2*np.sqrt(N), 10*k )
Xsample = randomsample( X, int(nsample) )
pass1centres = randomsample( X, int(k) )
samplecentres = kmeans( Xsample, pass1centres, **kwargs )[0]
return kmeans( X, samplecentres, **kwargs )
def cdist_sparse( X, Y, **kwargs ):
""" -> |X| x |Y| cdist array, any cdist metric
X or Y may be sparse -- best csr
"""
# todense row at a time, v slow if both v sparse
sxy = 2*issparse(X) + issparse(Y)
if sxy == 0:
return cdist( X, Y, **kwargs )
d = np.empty( (X.shape[0], Y.shape[0]), np.float64 )
if sxy == 2:
for j, x in enumerate(X):
d[j] = cdist( x.todense(), Y, **kwargs ) [0]
elif sxy == 1:
for k, y in enumerate(Y):
d[:,k] = cdist( X, y.todense(), **kwargs ) [0]
else:
for j, x in enumerate(X):
for k, y in enumerate(Y):
d[j,k] = cdist( x.todense(), y.todense(), **kwargs ) [0]
return d
def randomsample( X, n ):
""" random.sample of the rows of X
X may be sparse -- best csr
"""
sampleix = random.sample( xrange( X.shape[0] ), int(n) )
return X[sampleix]
def nearestcentres( X, centres, metric="euclidean", p=2 ):
""" each X -> nearest centre, any metric
euclidean2 (~ withinss) is more sensitive to outliers,
cityblock (manhattan, L1) less sensitive
"""
D = cdist( X, centres, metric=metric, p=p ) # |X| x |centres|
return D.argmin(axis=1)
def Lqmetric( x, y=None, q=.5 ):
# yes a metric, may increase weight of near matches; see ...
return (np.abs(x - y) ** q) .mean() if y is not None \
else (np.abs(x) ** q) .mean()
#...............................................................................
class Kmeans:
""" km = Kmeans( X, k= or centres=, ... )
in: either initial centres= for kmeans
or k= [nsample=] for kmeanssample
out: km.centres, km.Xtocentre, km.distances
iterator:
for jcentre, J in km:
clustercentre = centres[jcentre]
J indexes e.g. X[J], classes[J]
"""
def __init__( self, X, k=0, centres=None, nsample=0, **kwargs ):
self.X = X
if centres is None:
self.centres, self.Xtocentre, self.distances = kmeanssample(
X, k=k, nsample=nsample, **kwargs )
else:
self.centres, self.Xtocentre, self.distances = kmeans(
X, centres, **kwargs )
def __iter__(self):
for jc in range(len(self.centres)):
yield jc, (self.Xtocentre == jc)
#...............................................................................
if __name__ == "__main__":
import random
import sys
from time import time
N = 10000
dim = 10
ncluster = 10
kmsample = 100 # 0: random centres, > 0: kmeanssample
kmdelta = .001
kmiter = 10
metric = "cityblock" # "chebyshev" = max, "cityblock" L1, Lqmetric
seed = 1
exec( "\n".join( sys.argv[1:] )) # run this.py N= ...
np.set_printoptions( 1, threshold=200, edgeitems=5, suppress=True )
np.random.seed(seed)
random.seed(seed)
print "N %d dim %d ncluster %d kmsample %d metric %s" % (
N, dim, ncluster, kmsample, metric)
X = np.random.exponential( size=(N,dim) )
# cf scikits-learn datasets/
t0 = time()
if kmsample > 0:
centres, xtoc, dist = kmeanssample( X, ncluster, nsample=kmsample,
delta=kmdelta, maxiter=kmiter, metric=metric, verbose=2 )
else:
randomcentres = randomsample( X, ncluster )
centres, xtoc, dist = kmeans( X, randomcentres,
delta=kmdelta, maxiter=kmiter, metric=metric, verbose=2 )
print "%.0f msec" % ((time() - t0) * 1000)
# also ~/py/np/kmeans/test-kmeans.py
Some notes added 26mar 2012:
1) for cosine distance, first normalize all the data vectors to |X| = 1; then
cosinedistance( X, Y ) = 1 - X . Y = Euclidean distance |X - Y|^2 / 2
is fast. For bit vectors, keep the norms separately from the vectors instead of expanding out to floats (although some programs may expand for you). For sparse vectors, say 1 % of N, X . Y should take time O( 2 % N ), space O(N); but I don't know which programs do that.
2) Scikit-learn clustering gives an excellent overview of k-means, mini-batch-k-means ... with code that works on scipy.sparse matrices.
3) Always check cluster sizes after k-means.
If you're expecting roughly equal-sized clusters, but they come out
[44 37 9 5 5] %
... (sound of head-scratching).
ValueError: operands could not be broadcast together with shapes (0) (2)
Just thought I will let you know about it as it has something to do with your latest addition. –
Telethon verbose = 1
operates correctly without any errors in my case. –
Telethon k ~ N
. The nsample = max( 2*np.sqrt(N), 10*k )
line in your code will not let me pick large k's. I have temporarily worked around this by manually setting the sample size but would you have any other suggestions? –
Telethon k~N
was meant to say k close to N. Sorry about the notation. –
Telethon Unfortunately no: scikit-learn current implementation of k-means only uses Euclidean distances.
It is not trivial to extend k-means to other distances and denis' answer above is not the correct way to implement k-means for other metrics.
Just use nltk instead where you can do this, e.g.
from nltk.cluster.kmeans import KMeansClusterer
NUM_CLUSTERS = <choose a value>
data = <sparse matrix that you would normally give to scikit>.toarray()
kclusterer = KMeansClusterer(NUM_CLUSTERS, distance=nltk.cluster.util.cosine_distance, repeats=25)
assigned_clusters = kclusterer.cluster(data, assign_clusters=True)
repeats
), 1.5k points take 2 minutes, and 2k takes... too long. –
Turnsole nltk/3.7
's KMeansClusterer
does NOT work with the hamming distance because when calculating the new centroids (nltk/cluster/kmeans.py : 186
in KMeansClusterer._centroid()
, it uses floating point arithmetic. It would require some modifications to adapt to using a hamming distance. –
Molnar Yes you can use a difference metric function; however, by definition, the k-means clustering algorithm relies on the eucldiean distance from the mean of each cluster.
You could use a different metric, so even though you are still calculating the mean you could use something like the mahalnobis distance.
There is pyclustering which is python/C++ (so its fast!) and lets you specify a custom metric function
from pyclustering.cluster.kmeans import kmeans
from pyclustering.utils.metric import type_metric, distance_metric
user_function = lambda point1, point2: point1[0] + point2[0] + 2
metric = distance_metric(type_metric.USER_DEFINED, func=user_function)
# create K-Means algorithm with specific distance metric
start_centers = [[4.7, 5.9], [5.7, 6.5]];
kmeans_instance = kmeans(sample, start_centers, metric=metric)
# run cluster analysis and obtain results
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
Actually, i haven't tested this code but cobbled it together from a ticket and example code.
k-means of Spectral Python allows the use of L1 (Manhattan) distance.
Sklearn Kmeans uses the Euclidean distance. It has no metric parameter. This said, if you're clustering time series, you can use the tslearn
python package, when you can specify a metric (dtw
, softdtw
, euclidean
).
The Affinity propagation algorithm from the sklearn library allows you to pass the similarity matrix instead of the samples. So, you can use your metric to compute the similarity matrix (not the dissimilarity matrix) and pass it to the function by setting the "affinity" term to "precomputed".https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AffinityPropagation.html#sklearn.cluster.AffinityPropagation.fit In terms of the K-Mean, I think it is also possible but I have not tried it. However, as the other answers stated, finding the mean using a different metric will be the issue. Instead, you can use PAM (K-Medoids) algorthim as it calculates the change in Total Deviation (TD), thus it does not rely on the distance metric. https://python-kmedoids.readthedocs.io/en/latest/#fasterpam
Yes, in the current stable version of sklearn (scikit-learn 1.1.3), you can easily use your own distance metric. All you have to do is create a class that inherits from sklearn.cluster.KMeans
and overwrites its _transform
method.
The below example is for the IOU distance from the Yolov2 paper.
import sklearn.cluster
import numpy as np
def anchor_iou(box_dims, centroid_box_dims):
box_w, box_h = box_dims[..., 0], box_dims[..., 1]
centroid_w, centroid_h = centroid_box_dims[..., 0], centroid_box_dims[..., 1]
inter_w = np.minimum(box_w[..., np.newaxis], centroid_w[np.newaxis, ...])
inter_h = np.minimum(box_h[..., np.newaxis], centroid_h[np.newaxis, ...])
inter_area = inter_w * inter_h
centroid_area = centroid_w * centroid_h
box_area = box_w * box_h
return inter_area / (
centroid_area[np.newaxis, ...] + box_area[..., np.newaxis] - inter_area
)
class IOUKMeans(sklearn.cluster.KMeans):
def __init__(
self,
n_clusters=8,
*,
init="k-means++",
n_init=10,
max_iter=300,
tol=1e-4,
verbose=0,
random_state=None,
copy_x=True,
algorithm="lloyd",
):
super().__init__(
n_clusters=n_clusters,
init=init,
n_init=n_init,
max_iter=max_iter,
tol=tol,
verbose=verbose,
random_state=random_state,
copy_x=copy_x,
algorithm=algorithm
)
def _transform(self, X):
return anchor_iou(X, self.cluster_centers_)
rng = np.random.default_rng(12345)
num_boxes = 10
bboxes = rng.integers(low=0, high=100, size=(num_boxes, 2))
kmeans = IOUKMeans(num_clusters).fit(bboxes)
As of version scikit-learn==1.2.2
, one could replace _euclidean_distances
in sklearn.cluster._kmeans
with the following:
import sklearn.cluster._kmeans as kmeans
from sklearn.metrics import pairwise_distances
def custom_distances(X, Y=None, Y_norm_squared=None, squared=False):
if squared: #squared equals False during cluster center estimation
return pairwise_distances(X,Y, metric='minkowski', p=1.5)
else:
return pairwise_distances(X,Y, metric='minkowski', p=1.5)
kmeans._euclidean_distances = custom_distances
kmeans.euclidean_distances = custom_distances # utilized by the method `KMeans._transform`
Then create base estimator as usual
km = kmeans.KMeans(init="k-means++", n_clusters=clusters, n_init=4, random_state=0)
def distance_metrics(dist_metrics):
kmeans_instance = kmeans(trs_data, initial_centers, metric=dist_metrics)
label = np.zeros(210, dtype=int)
for i in range(0, len(clusters)):
for index, j in enumerate(clusters[i]):
label[j] = i
sklearn.cluster.KMeans
or sklearn.cluster.k_means
? –
Brainwork © 2022 - 2024 — McMap. All rights reserved.
pdist
. Or consider minimizing the L1 norm or KL divergence if given data points that have a sparsity constraint or which are probability distributions respectively. There's no reason why you can't run the k-means algorithm on these types of data, and just use an appropriate different "distance between points" when minimizing the loss function with respect to candidate centers. – Cog