Manual on how to use the fit-clusters script for clustering
usage: fit-clusters [-h] [--param [PARAM [PARAM ...]]]
[--config [CONFIG [CONFIG ...]]]
optional arguments:
-h, --help show this help message and exit
--param [PARAM [PARAM ...]]
List of Gin parameter bindings
--config [CONFIG [CONFIG ...]]
List of paths to the config filesfit-clusters \
--param \
load_data.path='/data/my_data.csv' \
DiviK.distance='euclidean' \
DiviK.use_logfilters=False \
DiviK.n_jobs=-1 \
--config \
my-defaults.gin \
my-overrides.ginimport divik.clusterexperiment.model = @DiviK()DiviK.distance = 'euclidean'
DiviK.verbose = Trueimport divik.cluster
KMeans.n_clusters = 3
KMeans.distance = "correlation"
KMeans.init = "kdtree_percentile"
KMeans.leaf_size = 0.01
KMeans.percentile = 99.0
KMeans.max_iter = 100
KMeans.normalize_rows = True
experiment.model = @KMeans()
experiment.omit_datetime = True
experiment.verbose = True
experiment.exist_ok = Trueimport divik.cluster
KMeans.n_clusters = 1
KMeans.distance = "correlation"
KMeans.init = "kdtree_percentile"
KMeans.leaf_size = 0.01
KMeans.percentile = 99.0
KMeans.max_iter = 100
KMeans.normalize_rows = True
GAPSearch.kmeans = @KMeans()
GAPSearch.max_clusters = 2
GAPSearch.n_jobs = 1
GAPSearch.seed = 42
GAPSearch.n_trials = 10
GAPSearch.sample_size = 1000
GAPSearch.drop_unfit = True
GAPSearch.verbose = True
DunnSearch.kmeans = @KMeans()
DunnSearch.max_clusters = 10
DunnSearch.method = "auto"
DunnSearch.inter = "closest"
DunnSearch.intra = "furthest"
DunnSearch.sample_size = 1000
DunnSearch.seed = 42
DunnSearch.n_jobs = 1
DunnSearch.drop_unfit = True
DunnSearch.verbose = True
DiviK.kmeans = @DunnSearch()
DiviK.fast_kmeans = @GAPSearch()
DiviK.distance = "correlation"
DiviK.minimal_size = 200
DiviK.rejection_size = 2
DiviK.minimal_features_percentage = 0.005
DiviK.features_percentage = 1.0
DiviK.normalize_rows = True
DiviK.use_logfilters = True
DiviK.filter_type = "gmm"
DiviK.n_jobs = 1
DiviK.verbose = True
experiment.model = @DiviK()
experiment.omit_datetime = True
experiment.verbose = True
experiment.exist_ok = Trueimport divik.core.gin_sklearn_configurablesexperiment.model = @MeanShift()MeanShift.n_jobs = -1
MeanShift.max_iter = 300import divik.core.gin_sklearn_configurables
MeanShift.cluster_all = True
MeanShift.n_jobs = -1
MeanShift.max_iter = 300
experiment.model = @MeanShift()
experiment.omit_datetime = True
experiment.verbose = True
experiment.exist_ok = Trueimport divik.core.gin_sklearn_configurablesimport divik.feature_extractionexperiment.model = @Pipeline()MeanShift.n_jobs = -1
MeanShift.max_iter = 300Pipeline.steps = [
('histogram_equalization', @HistogramEqualization()),
('exims', @EximsSelector()),
('pca', @KneePCA()),
('mean_shift', @MeanShift()),
]experiment.steps_that_require_xy = ['exims']import divik.core.gin_sklearn_configurables
import divik.feature_extraction
MeanShift.n_jobs = -1
MeanShift.max_iter = 300
Pipeline.steps = [
('histogram_equalization', @HistogramEqualization()),
('exims', @EximsSelector()),
('pca', @KneePCA()),
('mean_shift', @MeanShift()),
]
experiment.model = @Pipeline()
experiment.steps_that_require_xy = ['exims']
experiment.omit_datetime = True
experiment.verbose = True
experiment.exist_ok = Trueimport gin
@gin.configurable
class MyClustering:
passimport gin
gin.external_configurable(MyClustering)from divik.core.io import saver
@saver
def save_my_clustering(model, fname_fn, **kwargs):
if not hasattr(model, 'my_custom_field_'):
return
# custom saving logic comes hereimport myclusteringPython implementation of Divisive iK-means (DiviK) algorithm.
docker pull gmrukwa/diviksudo apt-get install libgomp1conda install -c conda-forge "compilers>=1.0.4,!=1.1.0" llvm-openmppip install divikpip install divik[gin]pip install divik[all]isort -m 3 --fgw 3 --tc .
black -t py36 .