This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
en:site:recherche:logiciels:sparqlwithspark:snowflakeq8 [16/09/2016 23:12] hubert [Plan for Spark 1.5] |
en:site:recherche:logiciels:sparqlwithspark:snowflakeq8 [16/09/2016 23:14] (current) hubert |
||
---|---|---|---|
Line 5: | Line 5: | ||
- | ===== Plan for Spark 1.5 ===== | + | ===== Plans for Spark 1.5 ===== |
<code scala> | <code scala> | ||
import org.apache.spark.rdd.RDD | import org.apache.spark.rdd.RDD | ||
import org.apache.spark.SparkContext | import org.apache.spark.SparkContext | ||
- | //import org.apache.spark.SparkContext._ | ||
//DF | //DF | ||
import org.apache.spark.sql.DataFrame | import org.apache.spark.sql.DataFrame | ||
- | //import sqlContext.implicits._ | ||
- | //import org.apache.spark.sql._ | ||
- | |||
- | // Import Row. | ||
- | // import org.apache.spark.sql.Row; | ||
- | |||
- | // Import Spark SQL data types | ||
- | //import org.apache.spark.sql.types.{StructType,StructField,LongType,StringType}; | ||
import scala.reflect.ClassTag | import scala.reflect.ClassTag | ||
- | //import scala.collection.mutable.ListBuffer | ||
import org.apache.spark.HashPartitioner | import org.apache.spark.HashPartitioner | ||
- | |||
- | //import java.io.Serializable | ||
- | |||
- | |||
val NB_FRAGMENTS = sc.defaultParallelism | val NB_FRAGMENTS = sc.defaultParallelism | ||
Line 60: | Line 46: | ||
SOByName.count | SOByName.count | ||
//328 620 776 | //328 620 776 | ||
- | |||
- | // on garde le dictionnaire en tant que RDD et pas en tant que DataFrame, car un DataFrame n'a pas la méthode lookup (avec accès à une seule partition) | ||
- | // durée moyenne du lookup d'un sujet avec un RDD trié vS partitionné | ||
- | // sortByKey: 330 ms | ||
- | // partitionBy (300 part): 160 ms | ||
- | // partitionBy (1200 part): 66 ms | ||
- | // partitionBy (3000 part): 83 ms | ||