I'm new to Apache Spark, want to find the similar text from a bunch of text, have tried myself as follows -
I have 2 RDD-
1st RDD contain incomplete text as follows -
[0,541 Suite 204, Redwood City, CA 94063]
[1,6649 N Blue Gum St, New Orleans,LA, 70116]
[2,#69, Los Angeles, Los Angeles, CA, 90034]
[3,98 Connecticut Ave Nw, Chagrin Falls]
[4,56 E Morehead Webb, TX, 78045]
2nd RDD contain correct address as follows -
[0,541 Jefferson Avenue, Suite 204, Redwood City, CA 94063]
[1,6649 N Blue Gum St, New Orleans, Orleans, LA, 70116]
[2,25 E 75th St #69, Los Angeles, Los Angeles, CA, 90034]
[3,98 Connecticut Ave Nw, Chagrin Falls, Geauga, OH, 44023]
[4,56 E Morehead St, Laredo, Webb, TX, 78045]
Have written this code, it is taking lot of time, can anyone please tell me the correct way of doing this in Apache Spark using scala.
val incorrect_address_count = incorrect_address_rdd.count()
val all_address = incorrect_address_rdd.union(correct_address_rdd) map (_._2.split(" ").toSeq)
val hashingTF = new HashingTF()
val tf = hashingTF.transform(all_address)
.zipWithIndex()
val input_vector_rdd = tf.filter(_._2 < incorrect_address_count)
val address_db_vector_rdd = tf.filter(_._2 >= incorrect_address_countt)
.map(f => (f._2 - input_count, f._1))
.join(correct_address_rdd)
.map(f => (f._2._1, f._2._2))
val input_similarity_rdd = input_vector_rdd.cartesian(address_db_vector_rdd)
.map(f => {
val cosine_similarity = cosineSimilarity(f._1._1.toDense, f._2._1.toDense)
(f._1._2, cosine_similarity, f._2._2)
})
def cosineSimilarity(vectorA: Vector, vectorB: Vector) = {
var dotProduct = 0.0
var normA = 0.0
var normB = 0.0
var index = vectorA.size - 1
for (i <- 0 to index) {
dotProduct += vectorA(i) * vectorB(i)
normA += Math.pow(vectorA(i), 2)
normB += Math.pow(vectorB(i), 2)
}
(dotProduct / (Math.sqrt(normA) * Math.sqrt(normB)))
}