第十三课:Scala模式匹配实战
模式匹配在很多地方都大量用到,一般会和case class结合起来使用,case class中的成员默认是不能变的
其实模式匹配看到它的语法,就是一个函数
package com.dt.spark.scala.basics
class DataFramework
case class ComputationFramework(name:String,popular:Boolean) extends DataFramework
case class StorageFramework(name:String,popular:Boolean) extends DataFramework
object HelloPatternMatch {
def main(args:Array[String]) {
// getSalary("Scala")
getSalary("haha",6)
getMatchType(10.0)
getMatchCollection(Array("Scala","aaa","bbb"))
/**
* Scala会帮case class 自动生成伴生对象中的apply方法
*/
getBigDataMatch(ComputationFramework("Spark",true))
getBigDataMatch(StorageFramework("HDFS",true))
}
def getSalary(name:String,age:Int) {
name match {
case "Spark" => println("$150000/year")
case "Hadoop" => println("$100000/year")
case _ if name =="Scala" => println("$140000/year")
case _ if name =="MapReduce" => println("$90000/year")
case _name if age > 5 => println(_name+":"+age)
case _ => println("$80000/year")
}
}
def getMatchType(msg:Any){
msg match {
case i : Int => println("Int")
case s : String => println("String")
case d : Double => println("Double")
case arr : Array[Any] => println("Array")
case _ => println("Unknown")
}
}
def getMatchCollection(msg:Array[String]){
msg match {
case Array("Scala") => println("One Element")
case Array("Scala","Java") => println("Two Element")
case Array("Spark",_*) => println("Spark")
case _ => println("Unknown")
}
}
def getBigDataMatch(data:DataFramework) {
data match {
case ComputationFramework(name,popular) =>
println(name+":"+popular)
case StorageFramework(name,popular) =>
println(name+":"+popular)
case _ =>
println("Unknown")
}
}
}
归纳总结:1.看笔记
package com.dt.spark.scala.basics
class DataFramework
case class ComputationFramework(name:String,popular:Boolean) extends DataFramework
case class StorageFramework(name:String,popular:Boolean) extends DataFramework
object HelloPatternMatch {
def main(args:Array[String]) {
// getSalary("Scala")
getSalary("haha",6)
getMatchType(10.0)
getMatchCollection(Array("Scala","aaa","bbb"))
/**
* Scala会帮case class 自动生成伴生对象中的apply方法
*/
getBigDataMatch(ComputationFramework("Spark",true))
getBigDataMatch(StorageFramework("HDFS",true))
getValue("Hadoop",Map("Spark" -> "hot"))
}
def getSalary(name:String,age:Int) {
name match {
case "Spark" => println("$150000/year")
case "Hadoop" => println("$100000/year")
case _ if name =="Scala" => println("$140000/year")
case _ if name =="MapReduce" => println("$90000/year")
case _name if age > 5 => println(_name+":"+age)
case _ => println("$80000/year")
}
}
def getMatchType(msg:Any){
msg match {
case i : Int => println("Int")
case s : String => println("String")
case d : Double => println("Double")
case arr : Array[Any] => println("Array")
case _ => println("Unknown")
}
}
def getMatchCollection(msg:Array[String]){
msg match {
case Array("Scala") => println("One Element")
case Array("Scala","Java") => println("Two Element")
case Array("Spark",_*) => println("Spark")
case _ => println("Unknown")
}
}
def getBigDataMatch(data:DataFramework) {
data match {
case ComputationFramework(name,popular) =>
println(name+":"+popular)
case StorageFramework(name,popular) =>
println(name+":"+popular)
case _ =>
println("Unknown")
}
}
def getValue(key:String,content:Map[String,String]) {
content.get(key) match {
case Some(value) => println(value)
case None => println("No find")
}
}
}
归纳总结:1.看笔记
转载于:https://my.oschina.net/u/1449867/blog/726732