上面咱们来为计算器程序减少语句块性能,使得程序能够做批量运算,
相似于程序语言中的代码块。

这次的代码以上一篇《递归向下算法实现Calc》
的代码为根底编写,如果发现不相熟当下的内容能够回顾一下之前的篇章。

代码清单【go语言为例】

package mainimport (    "fmt"    "strconv"    "io/ioutil"    "./bklexer")type Node interface {    GetValue() float64}type Block struct {    statements []Node}func NewBlock() *Block {    return &Block{}}func (block *Block) AddStatement(statement Node) {    block.statements = append(block.statements, statement)}func (block *Block) Eval() {    for i, statement := range block.statements {        fmt.Printf("out[%d] = %f\n", i, statement.GetValue())    }}type Number struct {    value float64}func NewNumber(token *BKLexer.Token) *Number {    value, _ := strconv.ParseFloat(token.Source, 64)    return &Number{value: value}}func (number *Number) GetValue() float64 {    return number.value}type BinaryOpt struct {    opt string    lhs Node    rhs Node}func NewBinaryOpt(token *BKLexer.Token, lhs Node, rhs Node) *BinaryOpt {    return &BinaryOpt{opt: token.Source, lhs: lhs, rhs: rhs}}func (binaryOpt *BinaryOpt) GetValue() float64 {    lhs, rhs := binaryOpt.lhs, binaryOpt.rhs    switch binaryOpt.opt {        case "+": return lhs.GetValue() + rhs.GetValue()        case "-": return lhs.GetValue() - rhs.GetValue()        case "*": return lhs.GetValue() * rhs.GetValue()        case "/": return lhs.GetValue() / rhs.GetValue()    }    return 0}func parse(lexer *BKLexer.Lexer) *Block {    block := NewBlock()    token := lexer.NextToken()    for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {        token = lexer.NextToken()    }    for token.TType != BKLexer.TOKEN_TYPE_EOF {        statement := parse_binary_add(lexer)        if statement == nil {            return nil;        }        token = lexer.GetToken()        if token.TType != BKLexer.TOKEN_TYPE_NEWLINE &&           token.TType != BKLexer.TOKEN_TYPE_EOF {            return nil;        }        block.AddStatement(statement)        for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {            token = lexer.NextToken()        }    }    return block}func parse_binary_add(lexer *BKLexer.Lexer) Node {    lhs := parse_binary_mul(lexer)    if lhs == nil {        return nil    }    token := lexer.GetToken()    for token.Source == "+" || token.Source == "-" {        lexer.NextToken()        rhs := parse_binary_mul(lexer)        if rhs == nil {            return nil        }        lhs = NewBinaryOpt(token, lhs, rhs)        token = lexer.GetToken()    }    return lhs}func parse_binary_mul(lexer *BKLexer.Lexer) Node {    lhs := parse_number(lexer)    if lhs == nil {        return nil    }    token := lexer.GetToken()    for token.Source == "*" || token.Source == "/" {        lexer.NextToken()        rhs := parse_number(lexer)        if rhs == nil {            return nil        }        lhs = NewBinaryOpt(token, lhs, rhs)        token = lexer.GetToken()    }    return lhs}func parse_number(lexer *BKLexer.Lexer) Node {    token := lexer.GetToken()    if token.Name == "LPAR" {        lexer.NextToken()        expr := parse_binary_add(lexer)        if expr == nil {            return nil        }        token := lexer.GetToken()        if token.Name != "RPAR" {            return nil        }        lexer.NextToken()        return expr    }    if token.Name == "NUMBER" {        number := NewNumber(token)        lexer.NextToken()        return number    }    return nil}func main() {    fmt.Println("Hello My Calc.")    lexer := BKLexer.NewLexer()    lexer.AddRule("\\d+\\.?\\d*", "NUMBER")    lexer.AddRule("\\+", "PLUS")    lexer.AddRule("-", "MINUS")    lexer.AddRule("\\*", "MUL")    lexer.AddRule("/", "DIV")    lexer.AddRule("\\(", "LPAR")    lexer.AddRule("\\)", "RPAR")    lexer.AddIgnores("[ \\f\\t]+")    lexer.AddIgnores("#[^\\r\\n]*")    bytes, err := ioutil.ReadFile("../test.txt")    if err != nil {        fmt.Println("read faild")        return    }    code := string(bytes)    fmt.Println(code)    lexer.Build(code)    result := parse(lexer)    if result == nil {        fmt.Println("null result")        return    }    result.Eval()}

引入须要应用的包

import (    "fmt"    "strconv"    "io/ioutil"    "./bklexer")
  • fmt 打印输出
  • strconv 字符串转换
  • io/ioutil 读取文件
  • ./bklexer 用于词法解析

定义语句块构造体

type Block struct {    statements []Node}func NewBlock() *Block {    return &Block{}}

Block构造的成员statements用于存储每一条语句,咱们应用NewBlock办法实例这个构造。

为语句块构造增加成员办法

func (block *Block) AddStatement(statement Node) {    block.statements = append(block.statements, statement)}func (block *Block) Eval() {    for i, statement := range block.statements {        fmt.Printf("out[%d] = %f\n", i, statement.GetValue())    }}

AddStatement用于插入新的语句,Eval用于批量计算结果并打印进去。

定义语法解释器入口函数

func parse(lexer *BKLexer.Lexer) *Block {    block := NewBlock()    token := lexer.NextToken()    for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {        token = lexer.NextToken()    }    for token.TType != BKLexer.TOKEN_TYPE_EOF {        statement := parse_binary_add(lexer)        if statement == nil {            return nil;        }        token = lexer.GetToken()        if token.TType != BKLexer.TOKEN_TYPE_NEWLINE &&           token.TType != BKLexer.TOKEN_TYPE_EOF {            return nil;        }        block.AddStatement(statement)        for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {            token = lexer.NextToken()        }    }    return block}

parse中首先实例一个block构造对象,而后开始解析Token
应用循环解析的办法依序解析失去每一条statement并插入block中,最初返回block。

须要留神的是咱们每一条语句的开端要么是换行要么是结尾,否则将被视为解析谬误并返回nil:

        token = lexer.GetToken()        if token.TType != BKLexer.TOKEN_TYPE_NEWLINE &&           token.TType != BKLexer.TOKEN_TYPE_EOF {            return nil;        }

在咱们解析完一条语句后,该当马上越过前面的换行符,这能够使得咱们在批量计算中疏忽空白行:

        for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {            token = lexer.NextToken()        }

有几处批改须要留神

func parse_number(lexer *BKLexer.Lexer) Node {    token := lexer.GetToken()        ......

parse_number函数中,
token := lexer.NextToken()变为token := lexer.GetToken()
因而咱们须要确保各个解析函数相互调用时须要先被动在以后函数中调用NextToken()办法。

例如:

......func parse(lexer *BKLexer.Lexer) *Block {    ......    token := lexer.NextToken()    for token.TType == BKLexer.TOKEN_TYPE_NEWLINE {        token = lexer.NextToken()    }    for token.TType != BKLexer.TOKEN_TYPE_EOF {        statement := parse_binary_add(lexer)......func parse_binary_add(lexer *BKLexer.Lexer) Node {    ......    for token.Source == "+" || token.Source == "-" {        lexer.NextToken()        rhs := parse_binary_mul(lexer)    ......

定义词法解析器规定

lexer := BKLexer.NewLexer()lexer.AddRule("\\d+\\.?\\d*", "NUMBER")lexer.AddRule("\\+", "PLUS")lexer.AddRule("-", "MINUS")lexer.AddRule("\\*", "MUL")lexer.AddRule("/", "DIV")lexer.AddRule("\\(", "LPAR")lexer.AddRule("\\)", "RPAR")lexer.AddIgnores("[ \\f\\t]+")lexer.AddIgnores("#[^\\r\\n]*")

须要留神的是这里对有效字符的设定还减少了新的规定lexer.AddIgnores("#[^\\r\\n]*")
这使得程序反对Python的正文语法。

读取文件进行解析计算

bytes, err := ioutil.ReadFile("../test.txt")if err != nil {    fmt.Println("read faild")    return}code := string(bytes)fmt.Println(code)lexer.Build(code)result := parse(lexer)if result == nil {    fmt.Println("null result")    return}result.Eval()

应用一段测试脚本进行测试

测试内容:

1 + 2 # plus3 - 4# here is a comment5 * 6 # mul7 / 81 + (2 - 3) * 4 / 5 # composite

运行后果:

➜ go run calc.goHello My Calc.1 + 2 # plus3 - 4# here is a comment5 * 6 # mul7 / 81 + (2 - 3) * 4 / 5 # compositeout[0] = 3.000000out[1] = -1.000000out[2] = 30.000000out[3] = 0.875000out[4] = 0.200000

下篇《使程序语言反对变量》,欢送关注。