From e1052f921c0d845fb6257d1347a97af4e4d8dfd9 Mon Sep 17 00:00:00 2001 From: Ayke van Laethem Date: Mon, 20 Jun 2022 14:40:35 +0200 Subject: [PATCH] compiler: define atomic intrinsic functions directly This changes the compiler from treating calls to sync/atomic.* functions as special calls (emitted directly at the call site) to actually defining their declarations when there is no Go SSA implementation. And rely on the inliner to inline these very small functions. This works a bit better in practice. For example, this makes it possible to use these functions in deferred function calls. This commit is a bit large because it also needs to refactor a few things to make it possible to define such intrinsic functions. --- compiler/atomic.go | 49 +++++++++++++++++++++--------------------- compiler/compiler.go | 42 ++++++++++++++++++++++++------------ compiler/intrinsics.go | 22 +++++++++++++++++++ testdata/atomic.go | 11 ++++++++++ testdata/atomic.txt | 1 + 5 files changed, 87 insertions(+), 38 deletions(-) diff --git a/compiler/atomic.go b/compiler/atomic.go index c6938bdd..f12c6d11 100644 --- a/compiler/atomic.go +++ b/compiler/atomic.go @@ -4,19 +4,17 @@ import ( "fmt" "strings" - "golang.org/x/tools/go/ssa" "tinygo.org/x/go-llvm" ) -// createAtomicOp lowers an atomic library call by lowering it as an LLVM atomic -// operation. It returns the result of the operation and true if the call could -// be lowered inline, and false otherwise. -func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) { - name := call.Value.(*ssa.Function).Name() +// createAtomicOp lowers a sync/atomic function by lowering it as an LLVM atomic +// operation. It returns the result of the operation, or a zero llvm.Value if +// the result is void. +func (b *builder) createAtomicOp(name string) llvm.Value { switch name { case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr": - ptr := b.getValue(call.Args[0]) - val := b.getValue(call.Args[1]) + ptr := b.getValue(b.fn.Params[0]) + val := b.getValue(b.fn.Params[1]) if strings.HasPrefix(b.Triple, "avr") { // AtomicRMW does not work on AVR as intended: // - There are some register allocation issues (fixed by https://reviews.llvm.org/D97127 which is not yet in a usable LLVM release) @@ -29,17 +27,18 @@ func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) { } oldVal := b.createCall(fn, []llvm.Value{ptr, val}, "") // Return the new value, not the original value returned. - return b.CreateAdd(oldVal, val, ""), true + return b.CreateAdd(oldVal, val, "") } oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpAdd, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true) // Return the new value, not the original value returned by atomicrmw. - return b.CreateAdd(oldVal, val, ""), true + return b.CreateAdd(oldVal, val, "") case "SwapInt32", "SwapInt64", "SwapUint32", "SwapUint64", "SwapUintptr", "SwapPointer": - ptr := b.getValue(call.Args[0]) - val := b.getValue(call.Args[1]) + ptr := b.getValue(b.fn.Params[0]) + val := b.getValue(b.fn.Params[1]) isPointer := val.Type().TypeKind() == llvm.PointerTypeKind if isPointer { // atomicrmw only supports integers, so cast to an integer. + // TODO: this is fixed in LLVM 15. val = b.CreatePtrToInt(val, b.uintptrType, "") ptr = b.CreateBitCast(ptr, llvm.PointerType(val.Type(), 0), "") } @@ -47,23 +46,23 @@ func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) { if isPointer { oldVal = b.CreateIntToPtr(oldVal, b.i8ptrType, "") } - return oldVal, true + return oldVal case "CompareAndSwapInt32", "CompareAndSwapInt64", "CompareAndSwapUint32", "CompareAndSwapUint64", "CompareAndSwapUintptr", "CompareAndSwapPointer": - ptr := b.getValue(call.Args[0]) - old := b.getValue(call.Args[1]) - newVal := b.getValue(call.Args[2]) + ptr := b.getValue(b.fn.Params[0]) + old := b.getValue(b.fn.Params[1]) + newVal := b.getValue(b.fn.Params[2]) tuple := b.CreateAtomicCmpXchg(ptr, old, newVal, llvm.AtomicOrderingSequentiallyConsistent, llvm.AtomicOrderingSequentiallyConsistent, true) swapped := b.CreateExtractValue(tuple, 1, "") - return swapped, true + return swapped case "LoadInt32", "LoadInt64", "LoadUint32", "LoadUint64", "LoadUintptr", "LoadPointer": - ptr := b.getValue(call.Args[0]) + ptr := b.getValue(b.fn.Params[0]) val := b.CreateLoad(ptr, "") val.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent) val.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required - return val, true + return val case "StoreInt32", "StoreInt64", "StoreUint32", "StoreUint64", "StoreUintptr", "StorePointer": - ptr := b.getValue(call.Args[0]) - val := b.getValue(call.Args[1]) + ptr := b.getValue(b.fn.Params[0]) + val := b.getValue(b.fn.Params[1]) if strings.HasPrefix(b.Triple, "avr") { // SelectionDAGBuilder is currently missing the "are unaligned atomics allowed" check for stores. vType := val.Type() @@ -79,13 +78,15 @@ func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) { if fn.IsNil() { fn = llvm.AddFunction(b.mod, name, llvm.FunctionType(vType, []llvm.Type{ptr.Type(), vType, b.uintptrType}, false)) } - return b.createCall(fn, []llvm.Value{ptr, val, llvm.ConstInt(b.uintptrType, 5, false)}, ""), true + b.createCall(fn, []llvm.Value{ptr, val, llvm.ConstInt(b.uintptrType, 5, false)}, "") + return llvm.Value{} } store := b.CreateStore(val, ptr) store.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent) store.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required - return store, true + return llvm.Value{} default: - return llvm.Value{}, false + b.addError(b.fn.Pos(), "unknown atomic operation: "+b.fn.Name()) + return llvm.Value{} } } diff --git a/compiler/compiler.go b/compiler/compiler.go index 043d2cf7..191fb5ae 100644 --- a/compiler/compiler.go +++ b/compiler/compiler.go @@ -785,7 +785,12 @@ func (c *compilerContext) createPackage(irbuilder llvm.Builder, pkg *ssa.Package // Create the function definition. b := newBuilder(c, irbuilder, member) if member.Blocks == nil { - continue // external function + // Try to define this as an intrinsic function. + b.defineIntrinsicFunction() + // It might not be an intrinsic function but simply an external + // function (defined via //go:linkname). Leave it undefined in + // that case. + continue } b.createFunction() case *ssa.Type: @@ -1009,10 +1014,11 @@ func (c *compilerContext) getEmbedFileString(file *loader.EmbedFile) llvm.Value return llvm.ConstNamedStruct(c.getLLVMRuntimeType("_string"), []llvm.Value{strPtr, strLen}) } -// createFunction builds the LLVM IR implementation for this function. The -// function must not yet be defined, otherwise this function will create a -// diagnostic. -func (b *builder) createFunction() { +// Start defining a function so that it can be filled with instructions: load +// parameters, create basic blocks, and set up debug information. +// This is separated out from createFunction() so that it is also usable to +// define compiler intrinsics like the atomic operations in sync/atomic. +func (b *builder) createFunctionStart() { if b.DumpSSA { fmt.Printf("\nfunc %s:\n", b.fn) } @@ -1082,7 +1088,16 @@ func (b *builder) createFunction() { b.blockEntries[block] = llvmBlock b.blockExits[block] = llvmBlock } - entryBlock := b.blockEntries[b.fn.Blocks[0]] + var entryBlock llvm.BasicBlock + if len(b.fn.Blocks) != 0 { + // Normal functions have an entry block. + entryBlock = b.blockEntries[b.fn.Blocks[0]] + } else { + // This function isn't defined in Go SSA. It is probably a compiler + // intrinsic (like an atomic operation). Create the entry block + // manually. + entryBlock = b.ctx.AddBasicBlock(b.llvmFn, "entry") + } b.SetInsertPointAtEnd(entryBlock) if b.fn.Synthetic == "package initializer" { @@ -1157,6 +1172,13 @@ func (b *builder) createFunction() { // them. b.deferInitFunc() } +} + +// createFunction builds the LLVM IR implementation for this function. The +// function must not yet be defined, otherwise this function will create a +// diagnostic. +func (b *builder) createFunction() { + b.createFunctionStart() // Fill blocks with instructions. for _, block := range b.fn.DomPreorder() { @@ -1630,14 +1652,6 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error) supportsRecover = 1 } return llvm.ConstInt(b.ctx.Int1Type(), supportsRecover, false), nil - case strings.HasPrefix(name, "sync/atomic."): - val, ok := b.createAtomicOp(instr) - if ok { - // This call could be lowered as an atomic operation. - return val, nil - } - // This call couldn't be lowered as an atomic operation, it's - // probably something else. Continue as usual. case name == "runtime/interrupt.New": return b.createInterruptGlobal(instr) } diff --git a/compiler/intrinsics.go b/compiler/intrinsics.go index aeab5b11..50dde981 100644 --- a/compiler/intrinsics.go +++ b/compiler/intrinsics.go @@ -3,12 +3,34 @@ package compiler // This file contains helper functions to create calls to LLVM intrinsics. import ( + "go/token" "strconv" + "strings" "golang.org/x/tools/go/ssa" "tinygo.org/x/go-llvm" ) +// Define unimplemented intrinsic functions. +// +// Some functions are either normally implemented in Go assembly (like +// sync/atomic functions) or intentionally left undefined to be implemented +// directly in the compiler (like runtime/volatile functions). Either way, look +// for these and implement them if this is the case. +func (b *builder) defineIntrinsicFunction() { + name := b.fn.RelString(nil) + switch { + case strings.HasPrefix(name, "sync/atomic.") && token.IsExported(b.fn.Name()): + b.createFunctionStart() + returnValue := b.createAtomicOp(b.fn.Name()) + if !returnValue.IsNil() { + b.CreateRet(returnValue) + } else { + b.CreateRetVoid() + } + } +} + // createMemoryCopyCall creates a call to a builtin LLVM memcpy or memmove // function, declaring this function if needed. These calls are treated // specially by optimization passes possibly resulting in better generated code, diff --git a/testdata/atomic.go b/testdata/atomic.go index f99a39bb..2b9131c9 100644 --- a/testdata/atomic.go +++ b/testdata/atomic.go @@ -81,6 +81,9 @@ func main() { // test atomic.Value load/store operations testValue(int(3), int(-2)) testValue("", "foobar", "baz") + + // Test atomic operations as deferred values. + testDefer() } func testValue(values ...interface{}) { @@ -93,3 +96,11 @@ func testValue(values ...interface{}) { } } } + +func testDefer() { + n1 := int32(5) + defer func() { + println("deferred atomic add:", n1) + }() + defer atomic.AddInt32(&n1, 3) +} diff --git a/testdata/atomic.txt b/testdata/atomic.txt index d1f2ab29..a03f292c 100644 --- a/testdata/atomic.txt +++ b/testdata/atomic.txt @@ -33,3 +33,4 @@ StoreUint32: 20 StoreUint64: 20 StoreUintptr: 20 StorePointer: true +deferred atomic add: 8