Choice depends on data set!
scale()
function
List of most common ones!
There are many more distance measures
hclust()
and agnes()
diana()
hclust
and heatmap.2
library(gplots)
y <- matrix(rnorm(500), 100, 5, dimnames=list(paste("g", 1:100, sep=""), paste("t", 1:5, sep="")))
heatmap.2(y) # Shortcut to final result
## Row- and column-wise clustering
hr <- hclust(as.dist(1-cor(t(y), method="pearson")), method="complete")
hc <- hclust(as.dist(1-cor(y, method="spearman")), method="complete")
## Tree cutting
mycl <- cutree(hr, h=max(hr$height)/1.5); mycolhc <- rainbow(length(unique(mycl)), start=0.1, end=0.9); mycolhc <- mycolhc[as.vector(mycl)]
## Plot heatmap
mycol <- colorpanel(40, "darkblue", "yellow", "white") # or try redgreen(75)
heatmap.2(y, Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), col=mycol, scale="row", density.info="none", trace="none", RowSideColors=mycolhc)
km <- kmeans(t(scale(t(y))), 3)
km$cluster
## g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15 g16 g17 g18 g19 g20
## 1 1 2 3 1 2 2 2 3 2 2 3 3 3 3 2 3 2 3 3
## g21 g22 g23 g24 g25 g26 g27 g28 g29 g30 g31 g32 g33 g34 g35 g36 g37 g38 g39 g40
## 3 2 2 3 3 1 3 2 1 2 2 2 1 2 1 3 3 1 3 3
## g41 g42 g43 g44 g45 g46 g47 g48 g49 g50 g51 g52 g53 g54 g55 g56 g57 g58 g59 g60
## 1 1 1 3 2 2 2 3 2 3 3 1 2 3 2 3 3 3 3 1
## g61 g62 g63 g64 g65 g66 g67 g68 g69 g70 g71 g72 g73 g74 g75 g76 g77 g78 g79 g80
## 3 3 2 1 3 1 3 1 2 2 2 2 3 3 2 3 2 1 1 2
## g81 g82 g83 g84 g85 g86 g87 g88 g89 g90 g91 g92 g93 g94 g95 g96 g97 g98 g99 g100
## 1 2 2 3 3 2 2 3 3 2 2 1 1 2 3 3 1 1 1 3
fanny
library(cluster) # Loads the cluster library.
fannyy <- fanny(y, k=4, metric = "euclidean", memb.exp = 1.2)
round(fannyy$membership, 2)[1:4,]
## [,1] [,2] [,3] [,4]
## g1 0.85 0.06 0.01 0.08
## g2 0.10 0.69 0.16 0.05
## g3 0.02 0.01 0.95 0.02
## g4 0.01 0.02 0.01 0.96
fannyy$clustering
## g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15 g16 g17 g18 g19 g20
## 1 2 3 4 2 3 1 1 2 3 3 2 2 4 4 1 2 3 4 4
## g21 g22 g23 g24 g25 g26 g27 g28 g29 g30 g31 g32 g33 g34 g35 g36 g37 g38 g39 g40
## 4 3 3 2 4 4 4 3 1 3 3 3 1 3 4 2 4 2 2 2
## g41 g42 g43 g44 g45 g46 g47 g48 g49 g50 g51 g52 g53 g54 g55 g56 g57 g58 g59 g60
## 1 2 3 2 1 3 3 2 1 2 1 2 1 2 3 4 2 2 2 1
## g61 g62 g63 g64 g65 g66 g67 g68 g69 g70 g71 g72 g73 g74 g75 g76 g77 g78 g79 g80
## 4 4 1 2 2 4 2 4 3 3 4 3 2 2 1 2 2 2 1 1
## g81 g82 g83 g84 g85 g86 g87 g88 g89 g90 g91 g92 g93 g94 g95 g96 g97 g98 g99 g100
## 3 3 3 2 4 3 3 4 4 3 3 3 2 3 4 1 1 1 2 4
Principal components analysis (PCA) is a data reduction technique that allows to simplify multidimensional data sets to 2 or 3 dimensions for plotting purposes and visual variance analysis.
pca <- prcomp(y, scale=T)
summary(pca) # Prints variance summary for all principal components
## Importance of components%s:
## PC1 PC2 PC3 PC4 PC5
## Standard deviation 1.1165 1.0418 1.0237 0.9311 0.8679
## Proportion of Variance 0.2493 0.2170 0.2096 0.1734 0.1506
## Cumulative Proportion 0.2493 0.4664 0.6760 0.8494 1.0000
plot(pca$x, pch=20, col="blue", type="n") # To plot dots, drop type="n"
text(pca$x, rownames(pca$x), cex=0.8)
1st and 2nd principal components explain x% of variance in data.
The following example performs MDS analysis with cmdscale
on the geographic distances among European cities.
loc <- cmdscale(eurodist)
plot(loc[,1], -loc[,2], type="n", xlab="", ylab="", main="cmdscale(eurodist)")
text(loc[,1], -loc[,2], rownames(loc), cex=0.8)
Finds in matrix subgroups of rows and columns which are as similar as possible to each other and as different as possible to the remaining data points.
The following imports the cindex()
function and computes the Jaccard Index for two sample clusters.
source("http://faculty.ucr.edu/~tgirke/Documents/R_BioCond/My_R_Scripts/clusterIndex.R")
library(cluster); y <- matrix(rnorm(5000), 1000, 5, dimnames=list(paste("g", 1:1000, sep=""), paste("t", 1:5, sep=""))); clarax <- clara(y, 49); clV1 <- clarax$clustering; clarax <- clara(y, 50); clV2 <- clarax$clustering
ci <- cindex(clV1=clV1, clV2=clV2, self=FALSE, minSZ=1, method="jaccard")
ci[2:3] # Returns Jaccard index and variables used to compute it
## $variables
## a b c
## 9245 4238 3951
##
## $Jaccard_Index
## [1] 0.5302856
The following example shows how one can cluster entire cluster result sets. First, 10 sample cluster results are created with Clara using k-values from 3 to 12. The results are stored as named clustering vectors in a list object. Then a nested sapply loop is used to generate a similarity matrix of Jaccard Indices for the clustering results. After converting the result into a distance matrix, hierarchical clustering is performed with hclust
.}
clVlist <- lapply(3:12, function(x) clara(y[1:30, ], k=x)$clustering); names(clVlist) <- paste("k", "=", 3:12)
d <- sapply(names(clVlist), function(x) sapply(names(clVlist), function(y) cindex(clV1=clVlist[[y]], clV2=clVlist[[x]], method="jaccard")[[3]]))
hv <- hclust(as.dist(1-d))
plot(as.dendrogram(hv), edgePar=list(col=3, lwd=4), horiz=T, main="Similarities of 10 Clara Clustering Results for k: 3-12")
## Sample data set
set.seed(1410)
y <- matrix(rnorm(50), 10, 5, dimnames=list(paste("g", 1:10, sep=""),
paste("t", 1:5, sep="")))
dim(y)
## [1] 10 5
## Scaling
yscaled <- t(scale(t(y))) # Centers and scales y row-wise
apply(yscaled, 1, sd)
## g1 g2 g3 g4 g5 g6 g7 g8 g9 g10
## 1 1 1 1 1 1 1 1 1 1
dist(y[1:4,], method = "euclidean")
## g1 g2 g3
## g2 4.793697
## g3 4.932658 6.354978
## g4 4.033789 4.788508 1.671968
Correlation matrix
c <- cor(t(y), method="pearson")
as.matrix(c)[1:4,1:4]
## g1 g2 g3 g4
## g1 1.00000000 -0.2965885 -0.00206139 -0.4042011
## g2 -0.29658847 1.0000000 -0.91661118 -0.4512912
## g3 -0.00206139 -0.9166112 1.00000000 0.7435892
## g4 -0.40420112 -0.4512912 0.74358925 1.0000000
Correlation-based distance matrix
d <- as.dist(1-c)
as.matrix(d)[1:4,1:4]
## g1 g2 g3 g4
## g1 0.000000 1.296588 1.0020614 1.4042011
## g2 1.296588 0.000000 1.9166112 1.4512912
## g3 1.002061 1.916611 0.0000000 0.2564108
## g4 1.404201 1.451291 0.2564108 0.0000000
hclust
Hierarchical clustering with complete linkage and basic tree plotting
hr <- hclust(d, method = "complete", members=NULL)
names(hr)
## [1] "merge" "height" "order" "labels" "method" "call"
## [7] "dist.method"
par(mfrow = c(1, 2)); plot(hr, hang = 0.1); plot(hr, hang = -1)
plot(as.dendrogram(hr), edgePar=list(col=3, lwd=4), horiz=T)
The ape
library provides more advanced features for tree plotting
library(ape)
plot.phylo(as.phylo(hr), type="p", edge.col=4, edge.width=2,
show.node.label=TRUE, no.margin=TRUE)
Accessing information in hclust objects
hr
##
## Call:
## hclust(d = d, method = "complete", members = NULL)
##
## Cluster method : complete
## Number of objects: 10
## Print row labels in the order they appear in the tree
hr$labels[hr$order]
## [1] "g10" "g3" "g4" "g2" "g9" "g6" "g7" "g1" "g5" "g8"
Tree cutting with cutree
mycl <- cutree(hr, h=max(hr$height)/2)
mycl[hr$labels[hr$order]]
## g10 g3 g4 g2 g9 g6 g7 g1 g5 g8
## 3 3 3 2 2 5 5 1 4 4
heatmap.2
All in one step: clustering and heatmap plotting
library(gplots)
heatmap.2(y, col=redgreen(75))
pheatmap
All in one step: clustering and heatmap plotting
library(pheatmap); library("RColorBrewer")
pheatmap(y, color=brewer.pal(9,"Blues"))
Customizes row and column clustering and shows tree cutting result in row color bar. Additional color schemes can be found here.
hc <- hclust(as.dist(1-cor(y, method="spearman")), method="complete")
mycol <- colorpanel(40, "darkblue", "yellow", "white")
heatmap.2(y, Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), col=mycol,
scale="row", density.info="none", trace="none",
RowSideColors=as.character(mycl))
Runs K-means clustering with PAM (partitioning around medoids) algorithm and shows result in color bar of hierarchical clustering result from before.
library(cluster)
pamy <- pam(d, 4)
(kmcol <- pamy$clustering)
## g1 g2 g3 g4 g5 g6 g7 g8 g9 g10
## 1 2 3 3 4 4 4 4 2 3
heatmap.2(y, Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), col=mycol,
scale="row", density.info="none", trace="none",
RowSideColors=as.character(kmcol))
Performs k-means fuzzy clustering
library(cluster)
fannyy <- fanny(d, k=4, memb.exp = 1.5)
round(fannyy$membership, 2)[1:4,]
## [,1] [,2] [,3] [,4]
## g1 1.00 0.00 0.00 0.00
## g2 0.00 0.99 0.00 0.00
## g3 0.02 0.01 0.95 0.03
## g4 0.00 0.00 0.99 0.01
fannyy$clustering
## g1 g2 g3 g4 g5 g6 g7 g8 g9 g10
## 1 2 3 3 4 4 4 4 2 3
## Returns multiple cluster memberships for coefficient above a certain
## value (here >0.1)
fannyyMA <- round(fannyy$membership, 2) > 0.10
apply(fannyyMA, 1, function(x) paste(which(x), collapse="_"))
## g1 g2 g3 g4 g5 g6 g7 g8 g9 g10
## "1" "2" "3" "3" "4" "4" "4" "2_4" "2" "3"
Performs MDS analysis on the geographic distances between European cities
loc <- cmdscale(eurodist)
## Plots the MDS results in 2D plot. The minus is required in this example to
## flip the plotting orientation.
plot(loc[,1], -loc[,2], type="n", xlab="", ylab="", main="cmdscale(eurodist)")
text(loc[,1], -loc[,2], rownames(loc), cex=0.8)
Performs PCA analysis after scaling the data. It returns a list with class prcomp
that contains five components: (1) the standard deviations (sdev) of the principal components, (2) the matrix of eigenvectors (rotation), (3) the principal component data (x), (4) the centering (center) and (5) scaling (scale) used.
library(scatterplot3d)
pca <- prcomp(y, scale=TRUE)
names(pca)
## [1] "sdev" "rotation" "center" "scale" "x"
summary(pca) # Prints variance summary for all principal components.
## Importance of components%s:
## PC1 PC2 PC3 PC4 PC5
## Standard deviation 1.3611 1.1777 1.0420 0.69264 0.4416
## Proportion of Variance 0.3705 0.2774 0.2172 0.09595 0.0390
## Cumulative Proportion 0.3705 0.6479 0.8650 0.96100 1.0000
scatterplot3d(pca$x[,1:3], pch=20, color="blue")
See here
sessionInfo()
## R version 3.4.0 (2017-04-21)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 14.04.5 LTS
##
## Matrix products: default
## BLAS: /usr/lib/libblas/libblas.so.3.0
## LAPACK: /usr/lib/lapack/liblapack.so.3.0
##
## locale:
## [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C LC_TIME=en_US.UTF-8
## [4] LC_COLLATE=en_US.UTF-8 LC_MONETARY=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8
## [7] LC_PAPER=en_US.UTF-8 LC_NAME=C LC_ADDRESS=C
## [10] LC_TELEPHONE=C LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C
##
## attached base packages:
## [1] stats graphics utils datasets grDevices base
##
## other attached packages:
## [1] ggplot2_2.2.1 scatterplot3d_0.3-40 RColorBrewer_1.1-2 pheatmap_1.0.8
## [5] ape_4.1 cluster_2.0.6 gplots_3.0.1 BiocStyle_2.4.0
##
## loaded via a namespace (and not attached):
## [1] Rcpp_0.12.10 knitr_1.15.1 magrittr_1.5 munsell_0.4.3 colorspace_1.3-2
## [6] lattice_0.20-35 plyr_1.8.4 stringr_1.2.0 caTools_1.17.1 tools_3.4.0
## [11] parallel_3.4.0 grid_3.4.0 gtable_0.2.0 nlme_3.1-131 KernSmooth_2.23-15
## [16] htmltools_0.3.5 gtools_3.5.0 lazyeval_0.2.0 yaml_2.1.14 rprojroot_1.2
## [21] digest_0.6.12 tibble_1.3.0 codetools_0.2-15 bitops_1.0-6 evaluate_0.10
## [26] rmarkdown_1.5 gdata_2.17.0 stringi_1.1.5 compiler_3.4.0 scales_0.4.1
## [31] methods_3.4.0 backports_1.0.5
Hathaway, R J, J C Bezdek, and N R Pal. 1996. “Sequential Competitive Learning and the Fuzzy c-Means Clustering Algorithms.” Neural Netw. 9 (5): 787–96. http://www.hubmed.org/display.cgi?uids=12662563.