标准化文档:
标准化源代码:
文档中就这么一句话:
Normalize a vector to have unit norm using the given p-norm.
使用给定的p-范数规范化向量,使其具有单位范数。
package org.apache.spark.ml.feature
import org.apache.spark.annotation.Since
import org.apache.spark.ml.UnaryTransformer
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.linalg.{Vector, VectorUDT}
import org.apache.spark.ml.param.{DoubleParam, ParamValidators}
import org.apache.spark.ml.util._
import org.apache.spark.mllib.feature
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
import org.apache.spark.sql.types._
/**
* Normalize a vector to have unit norm using the given p-norm.
*/
@Since("1.4.0")
class Normalizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends UnaryTransformer[Vector, Vector, Normalizer] with DefaultParamsWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("normalizer"))
/**
* Normalization in L^p^ space. Must be greater than equal to 1.
* (default: p = 2)
* @group param
*/
@Since("1.4.0")
val p = new DoubleParam(this, "p", "the p norm value", ParamValidators.gtEq(1))
setDefault(p -> 2.0)
/** @group getParam */
@Since("1.4.0")
def getP: Double = $(p)
/** @group setParam */
@Since("1.4.0")
def setP(value: Double): this.type = set(p, value)
override protected def createTransformFunc: Vector => Vector = {
val normalizer = new feature.Normalizer($(p))
vector => normalizer.transform(OldVectors.fromML(vector)).asML
}
override protected def validateInputType(inputType: DataType): Unit = {
require(inputType.isInstanceOf[VectorUDT],
s"Input type must be ${(new VectorUDT).catalogString} but got ${inputType.catalogString}.")
}
override protected def outputDataType: DataType = new VectorUDT()
@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
var outputSchema = super.transformSchema(schema)
if ($(inputCol).nonEmpty && $(outputCol).nonEmpty) {
val size = AttributeGroup.fromStructField(schema($(inputCol))).size
if (size >= 0) {
outputSchema = SchemaUtils.updateAttributeGroupSize(outputSchema,
$(outputCol), size)
}
}
outputSchema
}
@Since("3.0.0")
override def toString: String = {
s"Normalizer: uid=$uid, p=${$(p)}"
}
}
@Since("1.6.0")
object Normalizer extends DefaultParamsReadable[Normalizer] {
@Since("1.6.0")
override def load(path: String): Normalizer = super.load(path)
}
系列文章:
spark 中的 特征相关内容处理的文档
概念简介
参考: