diff --git a/cl/_testrt/makemap/in.go b/cl/_testrt/makemap/in.go new file mode 100644 index 00000000..a153a7ed --- /dev/null +++ b/cl/_testrt/makemap/in.go @@ -0,0 +1,12 @@ +package main + +func main() { + m := make(map[int]string) + m[1] = "hello" + m[2] = "world" + println(m[1], m[2]) + v, ok := m[2] + println(v, ok) + v2, ok := m[10] + println(v2, ok) +} diff --git a/cl/_testrt/makemap/out.ll b/cl/_testrt/makemap/out.ll new file mode 100644 index 00000000..19a73d0a --- /dev/null +++ b/cl/_testrt/makemap/out.ll @@ -0,0 +1,245 @@ +; ModuleID = 'main' +source_filename = "main" + +%"github.com/goplus/llgo/internal/runtime.String" = type { ptr, i64 } +%"github.com/goplus/llgo/internal/abi.StructField" = type { %"github.com/goplus/llgo/internal/runtime.String", ptr, i64, %"github.com/goplus/llgo/internal/runtime.String", i1 } +%"github.com/goplus/llgo/internal/runtime.Slice" = type { ptr, i64, i64 } + +@"main.init$guard" = global i1 false, align 1 +@__llgo_argc = global i32 0, align 4 +@__llgo_argv = global ptr null, align 8 +@"map[_llgo_int]_llgo_string" = linkonce global ptr null, align 8 +@0 = private unnamed_addr constant [7 x i8] c"topbits", align 1 +@1 = private unnamed_addr constant [4 x i8] c"keys", align 1 +@2 = private unnamed_addr constant [5 x i8] c"elems", align 1 +@3 = private unnamed_addr constant [8 x i8] c"overflow", align 1 +@4 = private unnamed_addr constant [4 x i8] c"main", align 1 +@5 = private unnamed_addr constant [5 x i8] c"hello", align 1 +@6 = private unnamed_addr constant [5 x i8] c"world", align 1 + +define void @main.init() { +_llgo_0: + %0 = load i1, ptr @"main.init$guard", align 1 + br i1 %0, label %_llgo_2, label %_llgo_1 + +_llgo_1: ; preds = %_llgo_0 + store i1 true, ptr @"main.init$guard", align 1 + call void @"main.init$after"() + br label %_llgo_2 + +_llgo_2: ; preds = %_llgo_1, %_llgo_0 + ret void +} + +define i32 @main(i32 %0, ptr %1) { +_llgo_0: + store i32 %0, ptr @__llgo_argc, align 4 + store ptr %1, ptr @__llgo_argv, align 8 + call void @"github.com/goplus/llgo/internal/runtime.init"() + call void @main.init() + %2 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %3 = call ptr @"github.com/goplus/llgo/internal/runtime.MakeMap"(ptr %2, i64 0) + %4 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %5 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %4, i32 0, i32 0 + store ptr @5, ptr %5, align 8 + %6 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %4, i32 0, i32 1 + store i64 5, ptr %6, align 4 + %7 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %4, align 8 + %8 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %9 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 1, ptr %9, align 4 + %10 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr %8, ptr %3, ptr %9) + store %"github.com/goplus/llgo/internal/runtime.String" %7, ptr %10, align 8 + %11 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %12 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %11, i32 0, i32 0 + store ptr @6, ptr %12, align 8 + %13 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %11, i32 0, i32 1 + store i64 5, ptr %13, align 4 + %14 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %11, align 8 + %15 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %16 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 2, ptr %16, align 4 + %17 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr %15, ptr %3, ptr %16) + store %"github.com/goplus/llgo/internal/runtime.String" %14, ptr %17, align 8 + %18 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %19 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 1, ptr %19, align 4 + %20 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAccess1"(ptr %18, ptr %3, ptr %19) + %21 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %20, align 8 + %22 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %23 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 2, ptr %23, align 4 + %24 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAccess1"(ptr %22, ptr %3, ptr %23) + %25 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %24, align 8 + call void @"github.com/goplus/llgo/internal/runtime.PrintString"(%"github.com/goplus/llgo/internal/runtime.String" %21) + call void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8 32) + call void @"github.com/goplus/llgo/internal/runtime.PrintString"(%"github.com/goplus/llgo/internal/runtime.String" %25) + call void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8 10) + %26 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %27 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 2, ptr %27, align 4 + %28 = call { ptr, i1 } @"github.com/goplus/llgo/internal/runtime.MapAccess2"(ptr %26, ptr %3, ptr %27) + %29 = extractvalue { ptr, i1 } %28, 0 + %30 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %29, align 8 + %31 = extractvalue { ptr, i1 } %28, 1 + %32 = alloca { %"github.com/goplus/llgo/internal/runtime.String", i1 }, align 8 + %33 = getelementptr inbounds { %"github.com/goplus/llgo/internal/runtime.String", i1 }, ptr %32, i32 0, i32 0 + store %"github.com/goplus/llgo/internal/runtime.String" %30, ptr %33, align 8 + %34 = getelementptr inbounds { %"github.com/goplus/llgo/internal/runtime.String", i1 }, ptr %32, i32 0, i32 1 + store i1 %31, ptr %34, align 1 + %35 = load { %"github.com/goplus/llgo/internal/runtime.String", i1 }, ptr %32, align 8 + %36 = extractvalue { %"github.com/goplus/llgo/internal/runtime.String", i1 } %35, 0 + %37 = extractvalue { %"github.com/goplus/llgo/internal/runtime.String", i1 } %35, 1 + call void @"github.com/goplus/llgo/internal/runtime.PrintString"(%"github.com/goplus/llgo/internal/runtime.String" %36) + call void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8 32) + call void @"github.com/goplus/llgo/internal/runtime.PrintBool"(i1 %37) + call void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8 10) + %38 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %39 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 10, ptr %39, align 4 + %40 = call { ptr, i1 } @"github.com/goplus/llgo/internal/runtime.MapAccess2"(ptr %38, ptr %3, ptr %39) + %41 = extractvalue { ptr, i1 } %40, 0 + %42 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %41, align 8 + %43 = extractvalue { ptr, i1 } %40, 1 + %44 = alloca { %"github.com/goplus/llgo/internal/runtime.String", i1 }, align 8 + %45 = getelementptr inbounds { %"github.com/goplus/llgo/internal/runtime.String", i1 }, ptr %44, i32 0, i32 0 + store %"github.com/goplus/llgo/internal/runtime.String" %42, ptr %45, align 8 + %46 = getelementptr inbounds { %"github.com/goplus/llgo/internal/runtime.String", i1 }, ptr %44, i32 0, i32 1 + store i1 %43, ptr %46, align 1 + %47 = load { %"github.com/goplus/llgo/internal/runtime.String", i1 }, ptr %44, align 8 + %48 = extractvalue { %"github.com/goplus/llgo/internal/runtime.String", i1 } %47, 0 + %49 = extractvalue { %"github.com/goplus/llgo/internal/runtime.String", i1 } %47, 1 + call void @"github.com/goplus/llgo/internal/runtime.PrintString"(%"github.com/goplus/llgo/internal/runtime.String" %48) + call void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8 32) + call void @"github.com/goplus/llgo/internal/runtime.PrintBool"(i1 %49) + call void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8 10) + ret i32 0 +} + +declare void @"github.com/goplus/llgo/internal/runtime.init"() + +define void @"main.init$after"() { +_llgo_0: + %0 = load ptr, ptr @"map[_llgo_int]_llgo_string", align 8 + %1 = icmp eq ptr %0, null + br i1 %1, label %_llgo_1, label %_llgo_2 + +_llgo_1: ; preds = %_llgo_0 + %2 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2) + %3 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 24) + %4 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %5 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %4, i32 0, i32 0 + store ptr @0, ptr %5, align 8 + %6 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %4, i32 0, i32 1 + store i64 7, ptr %6, align 4 + %7 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %4, align 8 + %8 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %9 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %8, i32 0, i32 0 + store ptr null, ptr %9, align 8 + %10 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %8, i32 0, i32 1 + store i64 0, ptr %10, align 4 + %11 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %8, align 8 + %12 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 8) + %13 = call ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64 8, ptr %12) + %14 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %7, ptr %13, i64 0, %"github.com/goplus/llgo/internal/runtime.String" %11, i1 false) + %15 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %16 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %15, i32 0, i32 0 + store ptr @1, ptr %16, align 8 + %17 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %15, i32 0, i32 1 + store i64 4, ptr %17, align 4 + %18 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %15, align 8 + %19 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %20 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %19, i32 0, i32 0 + store ptr null, ptr %20, align 8 + %21 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %19, i32 0, i32 1 + store i64 0, ptr %21, align 4 + %22 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %19, align 8 + %23 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2) + %24 = call ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64 8, ptr %23) + %25 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %18, ptr %24, i64 8, %"github.com/goplus/llgo/internal/runtime.String" %22, i1 false) + %26 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %27 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %26, i32 0, i32 0 + store ptr @2, ptr %27, align 8 + %28 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %26, i32 0, i32 1 + store i64 5, ptr %28, align 4 + %29 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %26, align 8 + %30 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %31 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %30, i32 0, i32 0 + store ptr null, ptr %31, align 8 + %32 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %30, i32 0, i32 1 + store i64 0, ptr %32, align 4 + %33 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %30, align 8 + %34 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 24) + %35 = call ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64 8, ptr %34) + %36 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %29, ptr %35, i64 72, %"github.com/goplus/llgo/internal/runtime.String" %33, i1 false) + %37 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %38 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %37, i32 0, i32 0 + store ptr @3, ptr %38, align 8 + %39 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %37, i32 0, i32 1 + store i64 8, ptr %39, align 4 + %40 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %37, align 8 + %41 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %42 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %41, i32 0, i32 0 + store ptr null, ptr %42, align 8 + %43 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %41, i32 0, i32 1 + store i64 0, ptr %43, align 4 + %44 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %41, align 8 + %45 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 26) + %46 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %40, ptr %45, i64 200, %"github.com/goplus/llgo/internal/runtime.String" %44, i1 false) + %47 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %48 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %47, i32 0, i32 0 + store ptr @4, ptr %48, align 8 + %49 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %47, i32 0, i32 1 + store i64 4, ptr %49, align 4 + %50 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %47, align 8 + %51 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 224) + %52 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 0 + store %"github.com/goplus/llgo/internal/abi.StructField" %14, ptr %52, align 8 + %53 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 1 + store %"github.com/goplus/llgo/internal/abi.StructField" %25, ptr %53, align 8 + %54 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 2 + store %"github.com/goplus/llgo/internal/abi.StructField" %36, ptr %54, align 8 + %55 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 3 + store %"github.com/goplus/llgo/internal/abi.StructField" %46, ptr %55, align 8 + %56 = alloca %"github.com/goplus/llgo/internal/runtime.Slice", align 8 + %57 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 0 + store ptr %51, ptr %57, align 8 + %58 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 1 + store i64 4, ptr %58, align 4 + %59 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 2 + store i64 4, ptr %59, align 4 + %60 = load %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, align 8 + %61 = call ptr @"github.com/goplus/llgo/internal/runtime.Struct"(%"github.com/goplus/llgo/internal/runtime.String" %50, i64 208, %"github.com/goplus/llgo/internal/runtime.Slice" %60) + %62 = call ptr @"github.com/goplus/llgo/internal/runtime.MapOf"(ptr %2, ptr %3, ptr %61, i64 4) + store ptr %62, ptr @"map[_llgo_int]_llgo_string", align 8 + br label %_llgo_2 + +_llgo_2: ; preds = %_llgo_1, %_llgo_0 + ret void +} + +declare ptr @"github.com/goplus/llgo/internal/runtime.MapOf"(ptr, ptr, ptr, i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.Struct"(%"github.com/goplus/llgo/internal/runtime.String", i64, %"github.com/goplus/llgo/internal/runtime.Slice") + +declare %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String", ptr, i64, %"github.com/goplus/llgo/internal/runtime.String", i1) + +declare ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64, ptr) + +declare ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.MakeMap"(ptr, i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr, ptr, ptr) + +declare ptr @"github.com/goplus/llgo/internal/runtime.MapAccess1"(ptr, ptr, ptr) + +declare void @"github.com/goplus/llgo/internal/runtime.PrintString"(%"github.com/goplus/llgo/internal/runtime.String") + +declare void @"github.com/goplus/llgo/internal/runtime.PrintByte"(i8) + +declare { ptr, i1 } @"github.com/goplus/llgo/internal/runtime.MapAccess2"(ptr, ptr, ptr) + +declare void @"github.com/goplus/llgo/internal/runtime.PrintBool"(i1) diff --git a/cl/_testrt/map/out.ll b/cl/_testrt/map/out.ll index 0591fc76..f1a5665c 100644 --- a/cl/_testrt/map/out.ll +++ b/cl/_testrt/map/out.ll @@ -1,10 +1,20 @@ ; ModuleID = 'main' source_filename = "main" +%"github.com/goplus/llgo/internal/runtime.String" = type { ptr, i64 } +%"github.com/goplus/llgo/internal/abi.StructField" = type { %"github.com/goplus/llgo/internal/runtime.String", ptr, i64, %"github.com/goplus/llgo/internal/runtime.String", i1 } +%"github.com/goplus/llgo/internal/runtime.Slice" = type { ptr, i64, i64 } + @"main.init$guard" = global i1 false, align 1 @__llgo_argc = global i32 0, align 4 @__llgo_argv = global ptr null, align 8 -@0 = private unnamed_addr constant [10 x i8] c"Hello %d\0A\00", align 1 +@"map[_llgo_int]_llgo_int" = linkonce global ptr null, align 8 +@0 = private unnamed_addr constant [7 x i8] c"topbits", align 1 +@1 = private unnamed_addr constant [4 x i8] c"keys", align 1 +@2 = private unnamed_addr constant [5 x i8] c"elems", align 1 +@3 = private unnamed_addr constant [8 x i8] c"overflow", align 1 +@4 = private unnamed_addr constant [4 x i8] c"main", align 1 +@5 = private unnamed_addr constant [10 x i8] c"Hello %d\0A\00", align 1 define void @main.init() { _llgo_0: @@ -13,6 +23,7 @@ _llgo_0: _llgo_1: ; preds = %_llgo_0 store i1 true, ptr @"main.init$guard", align 1 + call void @"main.init$after"() br label %_llgo_2 _llgo_2: ; preds = %_llgo_1, %_llgo_0 @@ -25,13 +36,145 @@ _llgo_0: store ptr %1, ptr @__llgo_argv, align 8 call void @"github.com/goplus/llgo/internal/runtime.init"() call void @main.init() - %2 = call ptr @"github.com/goplus/llgo/internal/runtime.MakeSmallMap"() - %3 = call i32 (ptr, ...) @printf(ptr @0, ) + %2 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8 + %3 = call ptr @"github.com/goplus/llgo/internal/runtime.MakeMap"(ptr %2, i64 2) + %4 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8 + %5 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 23, ptr %5, align 4 + %6 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr %4, ptr %3, ptr %5) + store i64 100, ptr %6, align 4 + %7 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8 + %8 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 7, ptr %8, align 4 + %9 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr %7, ptr %3, ptr %8) + store i64 29, ptr %9, align 4 + %10 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8 + %11 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 8) + store i64 23, ptr %11, align 4 + %12 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAccess1"(ptr %10, ptr %3, ptr %11) + %13 = load i64, ptr %12, align 4 + %14 = call i32 (ptr, ...) @printf(ptr @5, i64 %13) ret i32 0 } declare void @"github.com/goplus/llgo/internal/runtime.init"() -declare ptr @"github.com/goplus/llgo/internal/runtime.MakeSmallMap"() +define void @"main.init$after"() { +_llgo_0: + %0 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8 + %1 = icmp eq ptr %0, null + br i1 %1, label %_llgo_1, label %_llgo_2 + +_llgo_1: ; preds = %_llgo_0 + %2 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2) + %3 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2) + %4 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %5 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %4, i32 0, i32 0 + store ptr @0, ptr %5, align 8 + %6 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %4, i32 0, i32 1 + store i64 7, ptr %6, align 4 + %7 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %4, align 8 + %8 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %9 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %8, i32 0, i32 0 + store ptr null, ptr %9, align 8 + %10 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %8, i32 0, i32 1 + store i64 0, ptr %10, align 4 + %11 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %8, align 8 + %12 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 8) + %13 = call ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64 8, ptr %12) + %14 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %7, ptr %13, i64 0, %"github.com/goplus/llgo/internal/runtime.String" %11, i1 false) + %15 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %16 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %15, i32 0, i32 0 + store ptr @1, ptr %16, align 8 + %17 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %15, i32 0, i32 1 + store i64 4, ptr %17, align 4 + %18 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %15, align 8 + %19 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %20 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %19, i32 0, i32 0 + store ptr null, ptr %20, align 8 + %21 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %19, i32 0, i32 1 + store i64 0, ptr %21, align 4 + %22 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %19, align 8 + %23 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2) + %24 = call ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64 8, ptr %23) + %25 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %18, ptr %24, i64 8, %"github.com/goplus/llgo/internal/runtime.String" %22, i1 false) + %26 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %27 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %26, i32 0, i32 0 + store ptr @2, ptr %27, align 8 + %28 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %26, i32 0, i32 1 + store i64 5, ptr %28, align 4 + %29 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %26, align 8 + %30 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %31 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %30, i32 0, i32 0 + store ptr null, ptr %31, align 8 + %32 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %30, i32 0, i32 1 + store i64 0, ptr %32, align 4 + %33 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %30, align 8 + %34 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2) + %35 = call ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64 8, ptr %34) + %36 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %29, ptr %35, i64 72, %"github.com/goplus/llgo/internal/runtime.String" %33, i1 false) + %37 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %38 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %37, i32 0, i32 0 + store ptr @3, ptr %38, align 8 + %39 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %37, i32 0, i32 1 + store i64 8, ptr %39, align 4 + %40 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %37, align 8 + %41 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %42 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %41, i32 0, i32 0 + store ptr null, ptr %42, align 8 + %43 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %41, i32 0, i32 1 + store i64 0, ptr %43, align 4 + %44 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %41, align 8 + %45 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 12) + %46 = call %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String" %40, ptr %45, i64 136, %"github.com/goplus/llgo/internal/runtime.String" %44, i1 false) + %47 = alloca %"github.com/goplus/llgo/internal/runtime.String", align 8 + %48 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %47, i32 0, i32 0 + store ptr @4, ptr %48, align 8 + %49 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.String", ptr %47, i32 0, i32 1 + store i64 4, ptr %49, align 4 + %50 = load %"github.com/goplus/llgo/internal/runtime.String", ptr %47, align 8 + %51 = call ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64 224) + %52 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 0 + store %"github.com/goplus/llgo/internal/abi.StructField" %14, ptr %52, align 8 + %53 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 1 + store %"github.com/goplus/llgo/internal/abi.StructField" %25, ptr %53, align 8 + %54 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 2 + store %"github.com/goplus/llgo/internal/abi.StructField" %36, ptr %54, align 8 + %55 = getelementptr %"github.com/goplus/llgo/internal/abi.StructField", ptr %51, i64 3 + store %"github.com/goplus/llgo/internal/abi.StructField" %46, ptr %55, align 8 + %56 = alloca %"github.com/goplus/llgo/internal/runtime.Slice", align 8 + %57 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 0 + store ptr %51, ptr %57, align 8 + %58 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 1 + store i64 4, ptr %58, align 4 + %59 = getelementptr inbounds %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, i32 0, i32 2 + store i64 4, ptr %59, align 4 + %60 = load %"github.com/goplus/llgo/internal/runtime.Slice", ptr %56, align 8 + %61 = call ptr @"github.com/goplus/llgo/internal/runtime.Struct"(%"github.com/goplus/llgo/internal/runtime.String" %50, i64 144, %"github.com/goplus/llgo/internal/runtime.Slice" %60) + %62 = call ptr @"github.com/goplus/llgo/internal/runtime.MapOf"(ptr %2, ptr %3, ptr %61, i64 4) + store ptr %62, ptr @"map[_llgo_int]_llgo_int", align 8 + br label %_llgo_2 + +_llgo_2: ; preds = %_llgo_1, %_llgo_0 + ret void +} + +declare ptr @"github.com/goplus/llgo/internal/runtime.MapOf"(ptr, ptr, ptr, i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.Struct"(%"github.com/goplus/llgo/internal/runtime.String", i64, %"github.com/goplus/llgo/internal/runtime.Slice") + +declare %"github.com/goplus/llgo/internal/abi.StructField" @"github.com/goplus/llgo/internal/runtime.StructField"(%"github.com/goplus/llgo/internal/runtime.String", ptr, i64, %"github.com/goplus/llgo/internal/runtime.String", i1) + +declare ptr @"github.com/goplus/llgo/internal/runtime.ArrayOf"(i64, ptr) + +declare ptr @"github.com/goplus/llgo/internal/runtime.AllocU"(i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.MakeMap"(ptr, i64) + +declare ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr, ptr, ptr) + +declare ptr @"github.com/goplus/llgo/internal/runtime.MapAccess1"(ptr, ptr, ptr) declare i32 @printf(ptr, ...) diff --git a/internal/runtime/alg.go b/internal/runtime/alg.go new file mode 100644 index 00000000..a3269fe8 --- /dev/null +++ b/internal/runtime/alg.go @@ -0,0 +1,83 @@ +package runtime + +import ( + "unsafe" +) + +func ptrequal(p, q unsafe.Pointer) bool { + return p == q +} +func memequal0(p, q unsafe.Pointer) bool { + return true +} +func memequal8(p, q unsafe.Pointer) bool { + return *(*int8)(p) == *(*int8)(q) +} +func memequal16(p, q unsafe.Pointer) bool { + return *(*int16)(p) == *(*int16)(q) +} +func memequal32(p, q unsafe.Pointer) bool { + return *(*int32)(p) == *(*int32)(q) +} +func memequal64(p, q unsafe.Pointer) bool { + return *(*int64)(p) == *(*int64)(q) +} +func memequal128(p, q unsafe.Pointer) bool { + return *(*[2]int64)(p) == *(*[2]int64)(q) +} +func f32equal(p, q unsafe.Pointer) bool { + return *(*float32)(p) == *(*float32)(q) +} +func f64equal(p, q unsafe.Pointer) bool { + return *(*float64)(p) == *(*float64)(q) +} +func c64equal(p, q unsafe.Pointer) bool { + return *(*complex64)(p) == *(*complex64)(q) +} +func c128equal(p, q unsafe.Pointer) bool { + return *(*complex128)(p) == *(*complex128)(q) +} +func strequal(p, q unsafe.Pointer) bool { + return *(*string)(p) == *(*string)(q) +} +func interequal(p, q unsafe.Pointer) bool { + x := *(*iface)(p) + y := *(*iface)(q) + return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data) +} +func nilinterequal(p, q unsafe.Pointer) bool { + x := *(*eface)(p) + y := *(*eface)(q) + return x._type == y._type && efaceeq(x._type, x.data, y.data) +} +func efaceeq(t *_type, x, y unsafe.Pointer) bool { + if t == nil { + return true + } + eq := t.Equal + if eq == nil { + panic(errorString("comparing uncomparable type " + t.Str_).Error()) + } + if isDirectIface(t) { + // Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof. + // Maps and funcs are not comparable, so they can't reach here. + // Ptrs, chans, and single-element items can be compared directly using ==. + return x == y + } + return eq(x, y) +} +func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { + if tab == nil { + return true + } + t := tab._type + eq := t.Equal + if eq == nil { + panic(errorString("comparing uncomparable type " + t.Str_).Error()) + } + if isDirectIface(t) { + // See comment in efaceeq. + return x == y + } + return eq(x, y) +} diff --git a/internal/runtime/map.go b/internal/runtime/map.go index 4c83a751..886980fd 100644 --- a/internal/runtime/map.go +++ b/internal/runtime/map.go @@ -4,6 +4,14 @@ package runtime +import ( + "unsafe" + + "github.com/goplus/llgo/internal/abi" + "github.com/goplus/llgo/internal/runtime/goarch" + "github.com/goplus/llgo/internal/runtime/math" +) + // This file contains the implementation of Go's map type. // // A map is just a hash table. The data is arranged @@ -53,12 +61,6 @@ package runtime // Keep in mind this data is for maximally loaded tables, i.e. just // before the table grows. Typical tables will be somewhat less loaded. -import ( - "unsafe" - - "github.com/goplus/llgo/internal/abi" -) - const ( // Maximum number of key/elem pairs a bucket can hold. bucketCntBits = abi.MapBucketCountBits @@ -103,7 +105,7 @@ const ( sameSizeGrow = 8 // the current map growth is to a new map of the same size // sentinel bucket ID for iterator checks - // noCheck = 1<<(8*goarch.PtrSize) - 1 + noCheck = 1<<(8*goarch.PtrSize) - 1 ) // isEmpty reports whether the given tophash array entry represents an empty bucket entry. @@ -158,7 +160,6 @@ type bmap struct { // Followed by an overflow pointer. } -/* // A hash iteration structure. // If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go // and reflect/value.go to match the layout of this structure. @@ -286,7 +287,6 @@ func makemap64(t *maptype, hint int64, h *hmap) *hmap { } return makemap(t, int(hint), h) } -*/ // makemap_small implements Go map creation for make(map[k]v) and // make(map[k]v, hint) when hint is known to be at most bucketCnt @@ -297,7 +297,6 @@ func makemap_small() *hmap { return h } -/* // makemap implements Go map creation for make(map[k]v, hint). // If the compiler has determined that the map or the first bucket // can be created on the stack, h and/or bucket may be non-nil. @@ -395,18 +394,18 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un // NOTE: The returned pointer may keep the whole map live, so don't // hold onto it for very long. func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := getcallerpc() - pc := abi.FuncPCABIInternal(mapaccess1) - racereadpc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } + // if raceenabled && h != nil { + // callerpc := getcallerpc() + // pc := abi.FuncPCABIInternal(mapaccess1) + // racereadpc(unsafe.Pointer(h), callerpc, pc) + // raceReadObjectPC(t.Key, key, callerpc, pc) + // } + // if msanenabled && h != nil { + // msanread(key, t.Key.Size_) + // } + // if asanenabled && h != nil { + // asanread(key, t.Key.Size_) + // } if h == nil || h.count == 0 { if t.HashMightPanic() { t.Hasher(key, 0) // see issue 23734 @@ -443,7 +442,7 @@ bucketloop: if t.IndirectKey() { k = *((*unsafe.Pointer)(k)) } - if t.Key.Equal(key, k) { + if mapKeyEqual(t, key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { e = *((*unsafe.Pointer)(e)) @@ -456,18 +455,18 @@ bucketloop: } func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := getcallerpc() - pc := abi.FuncPCABIInternal(mapaccess2) - racereadpc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } + // if raceenabled && h != nil { + // callerpc := getcallerpc() + // pc := abi.FuncPCABIInternal(mapaccess2) + // racereadpc(unsafe.Pointer(h), callerpc, pc) + // raceReadObjectPC(t.Key, key, callerpc, pc) + // } + // if msanenabled && h != nil { + // msanread(key, t.Key.Size_) + // } + // if asanenabled && h != nil { + // asanread(key, t.Key.Size_) + // } if h == nil || h.count == 0 { if t.HashMightPanic() { t.Hasher(key, 0) // see issue 23734 @@ -504,7 +503,7 @@ bucketloop: if t.IndirectKey() { k = *((*unsafe.Pointer)(k)) } - if t.Key.Equal(key, k) { + if mapKeyEqual(t, key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { e = *((*unsafe.Pointer)(e)) @@ -548,7 +547,7 @@ bucketloop: if t.IndirectKey() { k = *((*unsafe.Pointer)(k)) } - if t.Key.Equal(key, k) { + if mapKeyEqual(t, key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { e = *((*unsafe.Pointer)(e)) @@ -581,18 +580,19 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if h == nil { panic(plainError("assignment to entry in nil map")) } - if raceenabled { - callerpc := getcallerpc() - pc := abi.FuncPCABIInternal(mapassign) - racewritepc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled { - msanread(key, t.Key.Size_) - } - if asanenabled { - asanread(key, t.Key.Size_) - } + + // if raceenabled { + // callerpc := getcallerpc() + // pc := abi.FuncPCABIInternal(mapassign) + // racewritepc(unsafe.Pointer(h), callerpc, pc) + // raceReadObjectPC(t.Key, key, callerpc, pc) + // } + // if msanenabled { + // msanread(key, t.Key.Size_) + // } + // if asanenabled { + // asanread(key, t.Key.Size_) + // } if h.flags&hashWriting != 0 { fatal("concurrent map writes") } @@ -635,7 +635,7 @@ bucketloop: if t.IndirectKey() { k = *((*unsafe.Pointer)(k)) } - if !t.Key.Equal(key, k) { + if !mapKeyEqual(t, key, k) { continue } // already have a mapping for key. Update it. @@ -674,12 +674,15 @@ bucketloop: kmem := newobject(t.Key) *(*unsafe.Pointer)(insertk) = kmem insertk = kmem + *(*unsafe.Pointer)(insertk) = key } if t.IndirectElem() { vmem := newobject(t.Elem) *(*unsafe.Pointer)(elem) = vmem } + typedmemmove(t.Key, insertk, key) + *inserti = top h.count++ @@ -695,18 +698,18 @@ done: } func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { - if raceenabled && h != nil { - callerpc := getcallerpc() - pc := abi.FuncPCABIInternal(mapdelete) - racewritepc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } + // if raceenabled && h != nil { + // callerpc := getcallerpc() + // pc := abi.FuncPCABIInternal(mapdelete) + // racewritepc(unsafe.Pointer(h), callerpc, pc) + // raceReadObjectPC(t.Key, key, callerpc, pc) + // } + // if msanenabled && h != nil { + // msanread(key, t.Key.Size_) + // } + // if asanenabled && h != nil { + // asanread(key, t.Key.Size_) + // } if h == nil || h.count == 0 { if t.HashMightPanic() { t.Hasher(key, 0) // see issue 23734 @@ -744,7 +747,7 @@ search: if t.IndirectKey() { k2 = *((*unsafe.Pointer)(k2)) } - if !t.Key.Equal(key, k2) { + if !mapKeyEqual(t, key, k2) { continue } // Only clear key if there are pointers in it. @@ -815,10 +818,10 @@ search: // by the compilers order pass or on the heap by reflect_mapiterinit. // Both need to have zeroed hiter since the struct contains pointers. func mapiterinit(t *maptype, h *hmap, it *hiter) { - if raceenabled && h != nil { - callerpc := getcallerpc() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit)) - } + // if raceenabled && h != nil { + // callerpc := getcallerpc() + // racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit)) + // } it.t = t if h == nil || h.count == 0 { @@ -859,7 +862,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { // Remember we have an iterator. // Can run concurrently with another mapiterinit(). if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { - atomic.Or8(&h.flags, iterator|oldIterator) + atomicOr8(&h.flags, iterator|oldIterator) } mapiternext(it) @@ -867,10 +870,10 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { func mapiternext(it *hiter) { h := it.h - if raceenabled { - callerpc := getcallerpc() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext)) - } + // if raceenabled { + // callerpc := getcallerpc() + // racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext)) + // } if h.flags&hashWriting != 0 { fatal("concurrent map iteration and map write") } @@ -932,7 +935,7 @@ next: // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). - if t.ReflexiveKey() || t.Key.Equal(k, k) { + if t.ReflexiveKey() || mapKeyEqual(t, k, k) { // If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. hash := t.Hasher(k, uintptr(h.hash0)) @@ -953,7 +956,7 @@ next: } } if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.ReflexiveKey() || t.Key.Equal(k, k)) { + !(t.ReflexiveKey() || mapKeyEqual(t, k, k)) { // This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. @@ -993,11 +996,11 @@ next: // mapclear deletes all keys from a map. func mapclear(t *maptype, h *hmap) { - if raceenabled && h != nil { - callerpc := getcallerpc() - pc := abi.FuncPCABIInternal(mapclear) - racewritepc(unsafe.Pointer(h), callerpc, pc) - } + // if raceenabled && h != nil { + // callerpc := getcallerpc() + // pc := abi.FuncPCABIInternal(mapclear) + // racewritepc(unsafe.Pointer(h), callerpc, pc) + // } if h == nil || h.count == 0 { return @@ -1211,7 +1214,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). hash := t.Hasher(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) { + if h.flags&iterator != 0 && !t.ReflexiveKey() && !mapKeyEqual(t, k2, k2) { // If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the @@ -1307,6 +1310,7 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { // Reflect stubs. Called from ../reflect/asm_*.s +/* //go:linkname reflect_makemap reflect.makemap func reflect_makemap(t *maptype, cap int) *hmap { // Check invariants and reflects math. @@ -1413,10 +1417,10 @@ func reflect_maplen(h *hmap) int { if h == nil { return 0 } - if raceenabled { - callerpc := getcallerpc() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } + // if raceenabled { + // callerpc := getcallerpc() + // racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) + // } return h.count } @@ -1430,12 +1434,13 @@ func reflectlite_maplen(h *hmap) int { if h == nil { return 0 } - if raceenabled { - callerpc := getcallerpc() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } + // if raceenabled { + // callerpc := getcallerpc() + // racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) + // } return h.count } +*/ const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize var zeroVal [maxZero]byte @@ -1445,7 +1450,7 @@ var zeroVal [maxZero]byte // rewrite the relocation (from the package init func) from the outlined // map init function to this symbol. Defined in assembly so as to avoid // complications with instrumentation (coverage, etc). -func mapinitnoop() +//func mapinitnoop() // mapclone for implementing maps.Clone // @@ -1723,4 +1728,3 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { b = b.overflow(t) } } -*/ diff --git a/internal/runtime/math/math.go b/internal/runtime/math/math.go index 8d38eab8..6b2f13b3 100644 --- a/internal/runtime/math/math.go +++ b/internal/runtime/math/math.go @@ -13,3 +13,24 @@ func MulUintptr(a, b uintptr) (uintptr, bool) { overflow := b > MaxUintptr/a return a * b, overflow } + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +// This is a copy from math/bits.Mul64 +// On supported platforms this is an intrinsic lowered by the compiler. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/internal/runtime/stubs.go b/internal/runtime/stubs.go index 0b81dc67..6c09e115 100644 --- a/internal/runtime/stubs.go +++ b/internal/runtime/stubs.go @@ -4,11 +4,23 @@ package runtime -import _ "unsafe" +import ( + "unsafe" + + "github.com/goplus/llgo/c/sync/atomic" + "github.com/goplus/llgo/internal/runtime/math" +) //go:linkname fastrand C.rand func fastrand() uint32 +func fastrand64() uint64 { + n := uint64(fastrand()) + n += 0xa0761d6478bd642f + hi, lo := math.Mul64(n, n^0xe7037ed1a0b428db) + return hi ^ lo +} + /* TODO(xsw): func fastrand() uint32 { mp := getg().m @@ -37,9 +49,74 @@ func fastrand() uint32 { } */ +//go:nosplit +func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// implementation of new builtin +// compiler (both frontend and SSA backend) knows the signature +// of this function. +func newobject(typ *_type) unsafe.Pointer { + return AllocZ(typ.Size_) +} + +// TODO +func roundupsize(size uintptr) uintptr { + // if size < _MaxSmallSize { + // if size <= smallSizeMax-8 { + // return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]]) + // } else { + // return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]]) + // } + // } + // if size+_PageSize < size { + // return size + // } + // return alignUp(size, _PageSize) + return size +} + +// newarray allocates an array of n elements of type typ. +func newarray(typ *_type, n int) unsafe.Pointer { + if n == 1 { + return AllocZ(typ.Size_) + } + mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) + if overflow || mem > maxAlloc || n < 0 { + panic(plainError("runtime: allocation size out of range")) + } + return AllocZ(mem) +} + const ( // _64bit = 1 on 64-bit systems, 0 on 32-bit systems _64bit = 1 << (^uintptr(0) >> 63) / 2 heapAddrBits = (_64bit)*48 + (1-_64bit)*(32) maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 ) + +func memclrHasPointers(ptr unsafe.Pointer, n uintptr) { + // bulkBarrierPreWrite(uintptr(ptr), 0, n) + // memclrNoHeapPointers(ptr, n) +} + +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) { +} + +func fatal(s string) { + print("fatal error: ", s, "\n") +} + +func throw(s string) { + print("fatal error: ", s, "\n") +} + +func atomicOr8(ptr *uint8, v uint8) uint8 { + return (uint8)(atomic.Or((*uint)(unsafe.Pointer(ptr)), uint(v))) +} + +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} diff --git a/internal/runtime/z_error.go b/internal/runtime/z_error.go index a3c0c831..eb05f07c 100644 --- a/internal/runtime/z_error.go +++ b/internal/runtime/z_error.go @@ -31,6 +31,12 @@ func (e errorString) Error() string { return "runtime error: " + string(e) } +type plainError string + +func (e plainError) Error() string { + return string(e) +} + func AssertRuntimeError(b bool, msg string) { if b { panic(errorString(msg).Error()) diff --git a/internal/runtime/z_map.go b/internal/runtime/z_map.go index 8d618f8e..244b1690 100644 --- a/internal/runtime/z_map.go +++ b/internal/runtime/z_map.go @@ -16,10 +16,69 @@ package runtime +import ( + "unsafe" + + "github.com/goplus/llgo/internal/abi" +) + // Map represents a Go map. type Map = hmap +type maptype = abi.MapType + +type slice struct { + array unsafe.Pointer + len int + cap int +} + +func typedmemmove(typ *_type, dst, src unsafe.Pointer) { + Typedmemmove(typ, dst, src) +} // MakeSmallMap creates a new small map. func MakeSmallMap() *Map { return makemap_small() } + +func MakeMap(t *maptype, hint int) *hmap { + return makemap(t, hint, nil) +} + +func MapAssign(t *maptype, h *Map, key unsafe.Pointer) unsafe.Pointer { + return mapassign(t, h, key) +} + +func MapAccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + return mapaccess1(t, h, key) +} + +func MapAccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { + return mapaccess2(t, h, key) +} + +func mapKeyEqual(t *maptype, p, q unsafe.Pointer) bool { + if isDirectIface(t.Key) { + switch t.Key.Size_ { + case 0: + return true + case 1: + return memequal8(p, q) + case 2: + return memequal16(p, q) + case 4: + return memequal32(p, q) + case 8: + return memequal64(p, q) + } + } + switch t.Key.Kind() { + case abi.String: + return strequal(p, q) + case abi.Complex64: + return c64equal(p, q) + case abi.Complex128: + return c128equal(p, q) + } + return t.Key.Equal(p, q) +} diff --git a/internal/runtime/z_type.go b/internal/runtime/z_type.go index fdb09c99..2b94da37 100644 --- a/internal/runtime/z_type.go +++ b/internal/runtime/z_type.go @@ -40,6 +40,9 @@ func Basic(kind Kind) *Type { FieldAlign_: uint8(align), Kind_: uint8(kind), Str_: name, + Equal: func(a, b unsafe.Pointer) bool { + return uintptr(a) == uintptr(b) + }, } } return tyBasic[kind] @@ -208,4 +211,28 @@ func ChanOf(dir int, strChan string, elem *Type) *Type { return &ret.Type } +func MapOf(key, elem *Type, bucket *Type, flags int) *Type { + ret := &abi.MapType{ + Type: Type{ + Size_: unsafe.Sizeof(uintptr(0)), + Hash: uint32(abi.Map), + Align_: pointerAlign, + FieldAlign_: pointerAlign, + Kind_: uint8(abi.Map), + Str_: "map[" + key.String() + "]" + elem.String(), + }, + Key: key, + Elem: elem, + Bucket: bucket, + KeySize: uint8(key.Size_), + ValueSize: uint8(elem.Size_), + BucketSize: uint16(bucket.Size_), + Flags: uint32(flags), + } + ret.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { + return uintptr(p) + } + return &ret.Type +} + // ----------------------------------------------------------------------------- diff --git a/ssa/abi/map.go b/ssa/abi/map.go new file mode 100644 index 00000000..e248aa45 --- /dev/null +++ b/ssa/abi/map.go @@ -0,0 +1,547 @@ +package abi + +import ( + "go/token" + "go/types" + "log" + + "github.com/goplus/llgo/internal/abi" +) + +// Builds a type representing a Bucket structure for +// the given map type. This type is not visible to users - +// we include only enough information to generate a correct GC +// program for it. +// Make sure this stays in sync with runtime/map.go. +// +// A "bucket" is a "struct" { +// tophash [BUCKETSIZE]uint8 +// keys [BUCKETSIZE]keyType +// elems [BUCKETSIZE]elemType +// overflow *bucket +// } +const ( + BUCKETSIZE = abi.MapBucketCount + MAXKEYSIZE = abi.MapMaxKeyBytes + MAXELEMSIZE = abi.MapMaxElemBytes +) + +func makefield(name string, t types.Type) *types.Var { + return types.NewField(token.NoPos, nil, name, t, false) +} + +// MapBucketType makes the map bucket type given the type of the map. +func MapBucketType(t *types.Map, sizes types.Sizes) types.Type { + keytype := t.Key() + elemtype := t.Elem() + if sizes.Sizeof(keytype) > MAXKEYSIZE { + keytype = types.NewPointer(keytype) + } + if sizes.Sizeof(elemtype) > MAXELEMSIZE { + elemtype = types.NewPointer(elemtype) + } + + field := make([]*types.Var, 0, 5) + + // The first field is: uint8 topbits[BUCKETSIZE]. + arr := types.NewArray(types.Typ[types.Uint8], BUCKETSIZE) + field = append(field, makefield("topbits", arr)) + + arr = types.NewArray(keytype, BUCKETSIZE) + //arr.SetNoalg(true) + keys := makefield("keys", arr) + field = append(field, keys) + + arr = types.NewArray(elemtype, BUCKETSIZE) + //arr.SetNoalg(true) + elems := makefield("elems", arr) + field = append(field, elems) + + // If keys and elems have no pointers, the map implementation + // can keep a list of overflow pointers on the side so that + // buckets can be marked as having no pointers. + // Arrange for the bucket to have no pointers by changing + // the type of the overflow field to uintptr in this case. + // See comment on hmap.overflow in runtime/map.go. + otyp := types.Typ[types.UnsafePointer] + if !HasPtrData(elemtype) && !HasPtrData(keytype) { + otyp = types.Typ[types.Uintptr] + } + overflow := makefield("overflow", otyp) + field = append(field, overflow) + + // link up fields + bucket := types.NewStruct(field[:], nil) + + // Check invariants that map code depends on. + if !types.Comparable(t.Key()) { + log.Fatalf("unsupported map key type for %v", t) + } + if BUCKETSIZE < 8 { + log.Fatalf("bucket size %d too small for proper alignment %d", BUCKETSIZE, 8) + } + if uint8(sizes.Alignof(keytype)) > BUCKETSIZE { + log.Fatalf("key align too big for %v", t) + } + if uint8(sizes.Alignof(elemtype)) > BUCKETSIZE { + log.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", sizes.Alignof(elemtype), t, BUCKETSIZE) + } + if sizes.Alignof(keytype) > MAXKEYSIZE { + log.Fatalf("key size too large for %v", t) + } + if sizes.Alignof(elemtype) > MAXELEMSIZE { + log.Fatalf("elem size too large for %v", t) + } + if sizes.Alignof(t.Key()) > MAXKEYSIZE && !isPointer(keytype) { + log.Fatalf("key indirect incorrect for %v", t) + } + if sizes.Alignof(t.Elem()) > MAXELEMSIZE && !isPointer(elemtype) { + log.Fatalf("elem indirect incorrect for %v", t) + } + if sizes.Sizeof(keytype)%sizes.Alignof(keytype) != 0 { + log.Fatalf("key size not a multiple of key align for %v", t) + } + if sizes.Sizeof(elemtype)%sizes.Alignof(elemtype) != 0 { + log.Fatalf("elem size not a multiple of elem align for %v", t) + } + if uint8(sizes.Alignof(bucket))%uint8(sizes.Alignof(keytype)) != 0 { + log.Fatalf("bucket align not multiple of key align %v", t) + } + if uint8(sizes.Alignof(bucket))%uint8(sizes.Alignof(elemtype)) != 0 { + log.Fatalf("bucket align not multiple of elem align %v", t) + } + offs := sizes.Offsetsof(field) + if offs[1]%sizes.Alignof(keytype) != 0 { + log.Fatalf("bad alignment of keys in bmap for %v", t) + } + if offs[2]%sizes.Alignof(elemtype) != 0 { + log.Fatalf("bad alignment of elems in bmap for %v", t) + } + + // // Double-check that overflow field is final memory in struct, + // // with no padding at end. + // if overflow.Offset != bucket.Size()-int64(types.PtrSize) { + // log.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d", + // t, overflow.Offset, bucket.Size()-int64(types.PtrSize)) + // } + return bucket +} + +func isPointer(t types.Type) (ok bool) { + _, ok = t.Underlying().(*types.Pointer) + return +} + +func MapTypeFlags(t *types.Map, sizes types.Sizes) (flags int) { + if sizes.Sizeof(t.Key()) > MAXKEYSIZE { + flags |= 1 // indirect key + } + if sizes.Sizeof(t.Elem()) > MAXELEMSIZE { + flags |= 2 // indirect value + } + if IsReflexive(t.Key()) { + flags |= 4 // reflexive key + } + if needkeyupdate(t.Key()) { + flags |= 8 // need key update + } + if hashMightPanic(t.Key()) { + flags |= 16 // hash might panic + } + return +} + +// $GOROOT/src/cmd/compile/internal/reflectdata/reflect.go +// func MapBucketType(t *types.Type) *types.Type { +// if t.MapType().Bucket != nil { +// return t.MapType().Bucket +// } + +// keytype := t.Key() +// elemtype := t.Elem() +// types.CalcSize(keytype) +// types.CalcSize(elemtype) +// if keytype.Size() > MAXKEYSIZE { +// keytype = types.NewPtr(keytype) +// } +// if elemtype.Size() > MAXELEMSIZE { +// elemtype = types.NewPtr(elemtype) +// } + +// field := make([]*types.Field, 0, 5) + +// // The first field is: uint8 topbits[BUCKETSIZE]. +// arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE) +// field = append(field, makefield("topbits", arr)) + +// arr = types.NewArray(keytype, BUCKETSIZE) +// arr.SetNoalg(true) +// keys := makefield("keys", arr) +// field = append(field, keys) + +// arr = types.NewArray(elemtype, BUCKETSIZE) +// arr.SetNoalg(true) +// elems := makefield("elems", arr) +// field = append(field, elems) + +// // If keys and elems have no pointers, the map implementation +// // can keep a list of overflow pointers on the side so that +// // buckets can be marked as having no pointers. +// // Arrange for the bucket to have no pointers by changing +// // the type of the overflow field to uintptr in this case. +// // See comment on hmap.overflow in runtime/map.go. +// otyp := types.Types[types.TUNSAFEPTR] +// if !elemtype.HasPointers() && !keytype.HasPointers() { +// otyp = types.Types[types.TUINTPTR] +// } +// overflow := makefield("overflow", otyp) +// field = append(field, overflow) + +// // link up fields +// bucket := types.NewStruct(field[:]) +// bucket.SetNoalg(true) +// types.CalcSize(bucket) + +// // Check invariants that map code depends on. +// if !types.IsComparable(t.Key()) { +// base.Fatalf("unsupported map key type for %v", t) +// } +// if BUCKETSIZE < 8 { +// base.Fatalf("bucket size %d too small for proper alignment %d", BUCKETSIZE, 8) +// } +// if uint8(keytype.Alignment()) > BUCKETSIZE { +// base.Fatalf("key align too big for %v", t) +// } +// if uint8(elemtype.Alignment()) > BUCKETSIZE { +// base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, BUCKETSIZE) +// } +// if keytype.Size() > MAXKEYSIZE { +// base.Fatalf("key size too large for %v", t) +// } +// if elemtype.Size() > MAXELEMSIZE { +// base.Fatalf("elem size too large for %v", t) +// } +// if t.Key().Size() > MAXKEYSIZE && !keytype.IsPtr() { +// base.Fatalf("key indirect incorrect for %v", t) +// } +// if t.Elem().Size() > MAXELEMSIZE && !elemtype.IsPtr() { +// base.Fatalf("elem indirect incorrect for %v", t) +// } +// if keytype.Size()%keytype.Alignment() != 0 { +// base.Fatalf("key size not a multiple of key align for %v", t) +// } +// if elemtype.Size()%elemtype.Alignment() != 0 { +// base.Fatalf("elem size not a multiple of elem align for %v", t) +// } +// if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 { +// base.Fatalf("bucket align not multiple of key align %v", t) +// } +// if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 { +// base.Fatalf("bucket align not multiple of elem align %v", t) +// } +// if keys.Offset%keytype.Alignment() != 0 { +// base.Fatalf("bad alignment of keys in bmap for %v", t) +// } +// if elems.Offset%elemtype.Alignment() != 0 { +// base.Fatalf("bad alignment of elems in bmap for %v", t) +// } + +// // Double-check that overflow field is final memory in struct, +// // with no padding at end. +// if overflow.Offset != bucket.Size()-int64(types.PtrSize) { +// base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d", +// t, overflow.Offset, bucket.Size()-int64(types.PtrSize)) +// } + +// t.MapType().Bucket = bucket + +// bucket.StructType().Map = t +// return bucket +// } + +// PtrDataSize returns the length in bytes of the prefix of t +// containing pointer data. Anything after this offset is scalar data. +// +// PtrDataSize is only defined for actual Go types. It's an error to +// use it on compiler-internal types (e.g., TSSA, TRESULTS). +func HasPtrData(t types.Type) bool { + switch t := t.Underlying().(type) { + case *types.Basic: + switch t.Kind() { + case types.String: + return true + case types.UnsafePointer: + return true + default: + return false + } + case *types.Pointer: + return true + case *types.Signature, *types.Chan, *types.Map: + return true + case *types.Interface: + return true + case *types.Slice: + return true + case *types.Array: + if t.Len() == 0 { + return false + } + return HasPtrData(t.Elem()) + case *types.Struct: + for i := 0; i < t.NumFields(); i++ { + if HasPtrData(t.Field(i).Type()) { + return true + } + } + return false + default: + log.Fatalf("PtrDataSize: unexpected type, %v", t) + return false + } +} + +// $GOROOT/src/cmd/compile/internal/types/type.go +// func PtrDataSize(t *Type) int64 { +// switch t.Kind() { +// case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32, +// TUINT32, TINT64, TUINT64, TINT, TUINT, +// TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64: +// return 0 + +// case TPTR: +// if t.Elem().NotInHeap() { +// return 0 +// } +// return int64(PtrSize) + +// case TUNSAFEPTR, TFUNC, TCHAN, TMAP: +// return int64(PtrSize) + +// case TSTRING: +// // struct { byte *str; intgo len; } +// return int64(PtrSize) + +// case TINTER: +// // struct { Itab *tab; void *data; } or +// // struct { Type *type; void *data; } +// // Note: see comment in typebits.Set +// return 2 * int64(PtrSize) + +// case TSLICE: +// if t.Elem().NotInHeap() { +// return 0 +// } +// // struct { byte *array; uintgo len; uintgo cap; } +// return int64(PtrSize) + +// case TARRAY: +// if t.NumElem() == 0 { +// return 0 +// } +// // t.NumElem() > 0 +// size := PtrDataSize(t.Elem()) +// if size == 0 { +// return 0 +// } +// return (t.NumElem()-1)*t.Elem().Size() + size + +// case TSTRUCT: +// // Find the last field that has pointers, if any. +// fs := t.Fields().Slice() +// for i := len(fs) - 1; i >= 0; i-- { +// if size := PtrDataSize(fs[i].Type); size > 0 { +// return fs[i].Offset + size +// } +// } +// return 0 + +// case TSSA: +// if t != TypeInt128 { +// base.Fatalf("PtrDataSize: unexpected ssa type %v", t) +// } +// return 0 + +// default: +// base.Fatalf("PtrDataSize: unexpected type, %v", t) +// return 0 +// } +// } + +// IsReflexive reports whether t has a reflexive equality operator. +// That is, if x==x for all x of type t. +func IsReflexive(t types.Type) bool { + switch t := t.Underlying().(type) { + case *types.Basic: + switch t.Kind() { + case types.Float32, types.Float64, types.Complex64, types.Complex128: + return false + default: + return true + } + case *types.Pointer, *types.Chan: + return true + case *types.Interface: + return false + case *types.Array: + return IsReflexive(t.Elem()) + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if !IsReflexive(t.Field(i).Type()) { + return false + } + } + return true + default: + log.Fatalf("bad type for map key: %v", t) + return false + } +} + +// $GOROOT/src/cmd/compile/internal/types/type.go +// func IsReflexive(t *Type) bool { +// switch t.Kind() { +// case TBOOL, +// TINT, +// TUINT, +// TINT8, +// TUINT8, +// TINT16, +// TUINT16, +// TINT32, +// TUINT32, +// TINT64, +// TUINT64, +// TUINTPTR, +// TPTR, +// TUNSAFEPTR, +// TSTRING, +// TCHAN: +// return true + +// case TFLOAT32, +// TFLOAT64, +// TCOMPLEX64, +// TCOMPLEX128, +// TINTER: +// return false + +// case TARRAY: +// return IsReflexive(t.Elem()) + +// case TSTRUCT: +// for _, t1 := range t.Fields().Slice() { +// if !IsReflexive(t1.Type) { +// return false +// } +// } +// return true + +// default: +// base.Fatalf("bad type for map key: %v", t) +// return false +// } +// } + +// $GOROOT/src/cmd/compile/internal/types/type.go +// needkeyupdate reports whether map updates with t as a key +// need the key to be updated. +func needkeyupdate(t types.Type) bool { + switch t := t.Underlying().(type) { + case *types.Basic: + switch t.Kind() { + case types.Float32, types.Float64, types.Complex64, types.Complex128: + return true // floats and complex can be +0/-0 + case types.String: + return true // strings might have smaller backing stores + default: + return false + } + case *types.Interface: + return true + case *types.Pointer, *types.Chan: + return false + case *types.Array: + return needkeyupdate(t.Elem()) + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if needkeyupdate(t.Field(i).Type()) { + return true + } + } + return false + default: + log.Fatalf("bad type for map key: %v", t) + return true + } +} + +// $GOROOT/src/cmd/compile/internal/reflectdata/reflect.go +// func needkeyupdate(t *types.Type) bool { +// switch t.Kind() { +// case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32, +// types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN: +// return false + +// case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0 +// types.TINTER, +// types.TSTRING: // strings might have smaller backing stores +// return true + +// case types.TARRAY: +// return needkeyupdate(t.Elem()) + +// case types.TSTRUCT: +// for _, t1 := range t.Fields().Slice() { +// if needkeyupdate(t1.Type) { +// return true +// } +// } +// return false + +// default: +// base.Fatalf("bad type for map key: %v", t) +// return true +// } +// } + +// hashMightPanic reports whether the hash of a map key of type t might panic. +func hashMightPanic(t types.Type) bool { + switch t := t.Underlying().(type) { + case *types.Interface: + return true + case *types.Array: + return hashMightPanic(t.Elem()) + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if hashMightPanic(t.Field(i).Type()) { + return true + } + } + return false + default: + return false + } +} + +// $GOROOT/src/cmd/compile/internal/reflectdata/reflect.go +// func hashMightPanic(t *types.Type) bool { +// switch t.Kind() { +// case types.TINTER: +// return true + +// case types.TARRAY: +// return hashMightPanic(t.Elem()) + +// case types.TSTRUCT: +// for _, t1 := range t.Fields().Slice() { +// if hashMightPanic(t1.Type) { +// return true +// } +// } +// return false + +// default: +// return false +// } +// } diff --git a/ssa/abitype.go b/ssa/abitype.go index f45c25e3..c6b574d6 100644 --- a/ssa/abitype.go +++ b/ssa/abitype.go @@ -70,6 +70,8 @@ func (b Builder) abiTypeOf(t types.Type) func() Expr { return b.abiArrayOf(t) case *types.Chan: return b.abiChanOf(t) + case *types.Map: + return b.abiMapOf(t) } panic("todo") } @@ -284,6 +286,25 @@ func (b Builder) abiChanOf(t *types.Chan) func() Expr { } } +func (b Builder) abiMapOf(t *types.Map) func() Expr { + key := b.abiTypeOf(t.Key()) + elem := b.abiTypeOf(t.Elem()) + bucket := b.abiTypeOf(b.bucketType(t)) + flags := abi.MapTypeFlags(t, (*goProgram)(b.Prog)) + return func() Expr { + return b.Call(b.Pkg.rtFunc("MapOf"), key(), elem(), bucket(), b.Prog.Val(flags)) + } +} + +func (b Builder) bucketType(t *types.Map) types.Type { + if bucket, ok := b.Pkg.bucket[t]; ok { + return bucket + } + bucket := abi.MapBucketType(t, (*goProgram)(b.Prog)) + b.Pkg.bucket[t] = bucket + return bucket +} + // func StructField(name string, typ *abi.Type, off uintptr, tag string, embedded bool) // func Struct(pkgPath string, size uintptr, fields []abi.StructField) func (b Builder) abiStructOf(t *types.Struct) func() Expr { diff --git a/ssa/datastruct.go b/ssa/datastruct.go index 2dd0a7f8..f7315f80 100644 --- a/ssa/datastruct.go +++ b/ssa/datastruct.go @@ -145,6 +145,15 @@ func (b Builder) SliceCap(x Expr) Expr { return Expr{ptr, b.Prog.Int()} } +func (b Builder) MapLen(x Expr) Expr { + if debugInstr { + log.Printf("MapLen %v\n", x.impl) + } + prog := b.Prog + x.Type = prog.Pointer(prog.Int()) + return b.Load(x) +} + // ----------------------------------------------------------------------------- // The IndexAddr instruction yields the address of the element at @@ -450,9 +459,12 @@ func (b Builder) MakeMap(t Type, nReserve Expr) (ret Expr) { if debugInstr { log.Printf("MakeMap %v, %v\n", t.RawType(), nReserve.impl) } + if nReserve.IsNil() { + nReserve = b.Prog.Val(0) + } + typ := b.abiType(t.raw.Type) + ret = b.InlineCall(b.Pkg.rtFunc("MakeMap"), typ, nReserve) ret.Type = t - ret.impl = b.InlineCall(b.Pkg.rtFunc("MakeSmallMap")).impl - // TODO(xsw): nReserve return } @@ -471,8 +483,21 @@ func (b Builder) Lookup(x, key Expr, commaOk bool) (ret Expr) { if debugInstr { log.Printf("Lookup %v, %v, %v\n", x.impl, key.impl, commaOk) } - // TODO(xsw) - // panic("todo") + prog := b.Prog + typ := b.abiType(x.raw.Type) + vtyp := prog.Elem(x.Type) + ptr := b.mapKeyPtr(key) + if commaOk { + vals := b.Call(b.Pkg.rtFunc("MapAccess2"), typ, x, ptr) + val := b.Load(Expr{b.impl.CreateExtractValue(vals.impl, 0, ""), prog.Pointer(vtyp)}) + ok := b.impl.CreateExtractValue(vals.impl, 1, "") + t := prog.Struct(vtyp, prog.Bool()) + return b.aggregateValue(t, val.impl, ok) + } else { + val := b.Call(b.Pkg.rtFunc("MapAccess1"), typ, x, ptr) + val.Type = prog.Pointer(vtyp) + ret = b.Load(val) + } return } @@ -489,8 +514,20 @@ func (b Builder) MapUpdate(m, k, v Expr) { if debugInstr { log.Printf("MapUpdate %v[%v] = %v\n", m.impl, k.impl, v.impl) } - // TODO(xsw) - // panic("todo") + typ := b.abiType(m.raw.Type) + ptr := b.mapKeyPtr(k) + ret := b.Call(b.Pkg.rtFunc("MapAssign"), typ, m, ptr) + ret.Type = b.Prog.Pointer(v.Type) + b.Store(ret, v) +} + +// key => unsafe.Pointer +func (b Builder) mapKeyPtr(x Expr) Expr { + typ := x.Type + vtyp := b.Prog.VoidPtr() + vptr := b.AllocU(typ) + b.Store(vptr, x) + return Expr{vptr.impl, vtyp} } // ----------------------------------------------------------------------------- diff --git a/ssa/expr.go b/ssa/expr.go index fa80cdce..42b25786 100644 --- a/ssa/expr.go +++ b/ssa/expr.go @@ -1005,6 +1005,8 @@ func (b Builder) BuiltinCall(fn string, args ...Expr) (ret Expr) { return b.StringLen(arg) case vkChan: return b.InlineCall(b.Pkg.rtFunc("ChanLen"), arg) + case vkMap: + return b.MapLen(arg) } } case "cap": @@ -1131,6 +1133,9 @@ func (b Builder) PrintEx(ln bool, args ...Expr) (ret Expr) { case vkChan: fn = "PrintPointer" typ = prog.VoidPtr() + case vkMap: + fn = "PrintPointer" + typ = prog.VoidPtr() default: panic(fmt.Errorf("illegal types for operand: print %v", arg.RawType())) } diff --git a/ssa/package.go b/ssa/package.go index c273fd7e..4596e810 100644 --- a/ssa/package.go +++ b/ssa/package.go @@ -326,12 +326,13 @@ func (p Program) NewPackage(name, pkgPath string) Package { pymods := make(map[string]Global) strs := make(map[string]llvm.Value) named := make(map[types.Type]Expr) + bucket := make(map[*types.Map]types.Type) p.NeedRuntime = false // Don't need reset p.needPyInit here // p.needPyInit = false ret := &aPackage{ mod: mod, vars: gbls, fns: fns, stubs: stubs, - pyobjs: pyobjs, pymods: pymods, strs: strs, named: named, Prog: p} + pyobjs: pyobjs, pymods: pymods, strs: strs, named: named, bucket: bucket, Prog: p} ret.abi.Init(pkgPath) return ret } @@ -576,6 +577,7 @@ type aPackage struct { pymods map[string]Global strs map[string]llvm.Value named map[types.Type]Expr + bucket map[*types.Map]types.Type afterb unsafe.Pointer patch func(types.Type) types.Type