drakogangcom / -1 Goto Github PK
View Code? Open in Web Editor NEW####490 #1
####490 #1
https://github.com/deroproject/astrobwt/blob/master/miner/miner.go#L1-L77/
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astrobwt
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"testing"
)
type testCase struct {
name string // name of test case
source string // source to index
patterns []string // patterns to lookup
}
var testCases = []testCase{
{
"empty string",
"",
[]string{
"",
"foo",
"(foo)",
".",
"a",
},
},
{
"all a's",
"aaaaaaaaaa", // 10 a's
[]string{
"",
"a",
"aa",
"aaa",
"aaaa",
"aaaaa",
"aaaaaa",
"aaaaaaa",
"aaaaaaaa",
"aaaaaaaaa",
"aaaaaaaaaa",
"aaaaaaaaaaa", // 11 a's
".",
".",
"a+",
"aa+",
"aaaa[b]?",
"aaa",
},
},
{
"abc",
"abc",
[]string{
"a",
"b",
"c",
"ab",
"bc",
"abc",
"a.c",
"a(b|c)",
"abc?",
},
},
{
"barbara*3",
"barbarabarbarabarbara",
[]string{
"a",
"bar",
"rab",
"arab",
"barbar",
"bara?bar",
},
},
{
"typing drill",
"Now is the time for all good men to come to the aid of their country.",
[]string{
"Now",
"the time",
"to come the aid",
"is the time for all good men to come to the aid of their",
"to (come|the)?",
},
},
{
"godoc simulation",
"package main\n\nimport(\n "rand"\n ",
[]string{},
},
}
// find all occurrences of s in source; report at most n occurrences
func find(src, s string, n int) []int {
var res []int
if s != "" && n != 0 {
// find at most n occurrences of s in src
for i := -1; n < 0 || len(res) < n; {
j := strings.Index(src[i+1:], s)
if j < 0 {
break
}
i += j + 1
res = append(res, i)
}
}
return res
}
func testLookup(t testing.T, tc testCase, x *Index, s string, n int) {
res := x.Lookup([]byte(s), n)
exp := find(tc.source, s, n)
// check that the lengths match
if len(res) != len(exp) {
t.Errorf("test %q, lookup %q (n = %d): expected %d results; got %d", tc.name, s, n, len(exp), len(res))
}
// if n >= 0 the number of results is limited --- unless n >= all results,
// we may obtain different positions from the Index and from find (because
// Index may not find the results in the same order as find) => in general
// we cannot simply check that the res and exp lists are equal
// check that each result is in fact a correct match and there are no duplicates
sort.Ints(res)
for i, r := range res {
if r < 0 || len(tc.source) <= r {
t.Errorf("test %q, lookup %q, result %d (n = %d): index %d out of range [0, %d[", tc.name, s, i, n, r, len(tc.source))
} else if !strings.HasPrefix(tc.source[r:], s) {
t.Errorf("test %q, lookup %q, result %d (n = %d): index %d not a match", tc.name, s, i, n, r)
}
if i > 0 && res[i-1] == r {
t.Errorf("test %q, lookup %q, result %d (n = %d): found duplicate index %d", tc.name, s, i, n, r)
}
}
if n < 0 {
// all results computed - sorted res and exp must be equal
for i, r := range res {
e := exp[i]
if r != e {
t.Errorf("test %q, lookup %q, result %d: expected index %d; got %d", tc.name, s, i, e, r)
}
}
}
}
func testFindAllIndex(t testing.T, tc testCase, x Index, rx regexp.Regexp, n int) {
res := x.FindAllIndex(rx, n)
exp := rx.FindAllStringIndex(tc.source, n)
// check that the lengths match
if len(res) != len(exp) {
t.Errorf("test %q, FindAllIndex %q (n = %d): expected %d results; got %d", tc.name, rx, n, len(exp), len(res))
}
// if n >= 0 the number of results is limited --- unless n >= all results,
// we may obtain different positions from the Index and from regexp (because
// Index may not find the results in the same order as regexp) => in general
// we cannot simply check that the res and exp lists are equal
// check that each result is in fact a correct match and the result is sorted
for i, r := range res {
if r[0] < 0 || r[0] > r[1] || len(tc.source) < r[1] {
t.Errorf("test %q, FindAllIndex %q, result %d (n == %d): illegal match [%d, %d]", tc.name, rx, i, n, r[0], r[1])
} else if !rx.MatchString(tc.source[r[0]:r[1]]) {
t.Errorf("test %q, FindAllIndex %q, result %d (n = %d): [%d, %d] not a match", tc.name, rx, i, n, r[0], r[1])
}
}
if n < 0 {
// all results computed - sorted res and exp must be equal
for i, r := range res {
e := exp[i]
if r[0] != e[0] || r[1] != e[1] {
t.Errorf("test %q, FindAllIndex %q, result %d: expected match [%d, %d]; got [%d, %d]",
tc.name, rx, i, e[0], e[1], r[0], r[1])
}
}
}
}
func testLookups(t testing.T, tc testCase, x *Index, n int) {
for _, pat := range tc.patterns {
testLookup(t, tc, x, pat, n)
if rx, err := regexp.Compile(pat); err == nil {
testFindAllIndex(t, tc, x, rx, n)
}
}
}
// index is used to hide the sort.Interface
type index Index
func (x index) Len() int { return x.sa.len() }
func (x index) Less(i, j int) bool { return bytes.Compare(x.at(i), x.at(j)) < 0 }
func (x *index) Swap(i, j int) {
if x.sa.int32 != nil {
x.sa.int32[i], x.sa.int32[j] = x.sa.int32[j], x.sa.int32[i]
} else {
x.sa.int64[i], x.sa.int64[j] = x.sa.int64[j], x.sa.int64[i]
}
}
func (x *index) at(i int) []byte {
return x.data[x.sa.get(i):]
}
func testConstruction(t testing.T, tc testCase, x Index) {
if !sort.IsSorted((index)(x)) {
t.Errorf("failed testConstruction %s", tc.name)
}
}
func equal(x, y *Index) bool {
if !bytes.Equal(x.data, y.data) {
return false
}
if x.sa.len() != y.sa.len() {
return false
}
n := x.sa.len()
for i := 0; i < n; i++ {
if x.sa.get(i) != y.sa.get(i) {
return false
}
}
return true
}
// returns the serialized index size
func testSaveRestore(t testing.T, tc testCase, x *Index) int {
var buf bytes.Buffer
if err := x.Write(&buf); err != nil {
t.Errorf("failed writing index %s (%s)", tc.name, err)
}
size := buf.Len()
var y Index
if err := y.Read(bytes.NewReader(buf.Bytes())); err != nil {
t.Errorf("failed reading index %s (%s)", tc.name, err)
}
if !equal(x, &y) {
t.Errorf("restored index doesn't match saved index %s", tc.name)
}
old := maxData32
defer func() {
maxData32 = old
}()
// Reread as forced 32.
y = Index{}
maxData32 = realMaxData32
if err := y.Read(bytes.NewReader(buf.Bytes())); err != nil {
t.Errorf("failed reading index %s (%s)", tc.name, err)
}
if !equal(x, &y) {
t.Errorf("restored index doesn't match saved index %s", tc.name)
}
// Reread as forced 64.
y = Index{}
maxData32 = -1
if err := y.Read(bytes.NewReader(buf.Bytes())); err != nil {
t.Errorf("failed reading index %s (%s)", tc.name, err)
}
if !equal(x, &y) {
t.Errorf("restored index doesn't match saved index %s", tc.name)
}
return size
}
func testIndex(t *testing.T) {
for _, tc := range testCases {
x := New([]byte(tc.source))
testConstruction(t, &tc, x)
testSaveRestore(t, &tc, x)
testLookups(t, &tc, x, 0)
testLookups(t, &tc, x, 1)
testLookups(t, &tc, x, 10)
testLookups(t, &tc, x, 2e9)
testLookups(t, &tc, x, -1)
}
}
func TestIndex32(t *testing.T) {
testIndex(t)
}
func TestIndex64(t *testing.T) {
maxData32 = -1
defer func() {
maxData32 = realMaxData32
}()
testIndex(t)
}
func TestNew32(t *testing.T) {
test(t, func(x []byte) []int {
sa := make([]int32, len(x))
text_32(x, sa)
out := make([]int, len(sa))
for i, v := range sa {
out[i] = int(v)
}
return out
})
}
func TestNew64(t *testing.T) {
test(t, func(x []byte) []int {
sa := make([]int64, len(x))
text_64(x, sa)
out := make([]int, len(sa))
for i, v := range sa {
out[i] = int(v)
}
return out
})
}
// test tests an arbitrary suffix array construction function.
// Generates many inputs, builds and checks suffix arrays.
func test(t testing.T, build func([]byte) []int) {
t.Run("ababab...", func(t testing.T) {
// Very repetitive input has numLMS = len(x)/2-1
// at top level, the largest it can be.
// But maxID is only two (aba and ab$).
size := 100000
if testing.Short() {
size = 10000
}
x := make([]byte, size)
for i := range x {
x[i] = "ab"[i%2]
}
testSA(t, x, build)
})
t.Run("forcealloc", func(t *testing.T) {
// Construct a pathological input that forces
// recurse_32 to allocate a new temporary buffer.
// The input must have more than N/3 LMS-substrings,
// which we arrange by repeating an SLSLSLSLSLSL pattern
// like ababab... above, but then we must also arrange
// for a large number of distinct LMS-substrings.
// We use this pattern:
// 1 255 1 254 1 253 1 ... 1 2 1 255 2 254 2 253 2 252 2 ...
// This gives approximately 2¹⁵ distinct LMS-substrings.
// We need to repeat at least one substring, though,
// or else the recursion can be bypassed entirely.
x := make([]byte, 100000, 100001)
lo := byte(1)
hi := byte(255)
for i := range x {
if i%2 == 0 {
x[i] = lo
} else {
x[i] = hi
hi--
if hi <= lo {
lo++
if lo == 0 {
lo = 1
}
hi = 255
}
}
}
x[:cap(x)][len(x)] = 0 // for sais.New
testSA(t, x, build)
})
t.Run("exhaustive2", func(t *testing.T) {
// All inputs over {0,1} up to length 21.
// Runs in about 10 seconds on my laptop.
x := make([]byte, 30)
numFail := 0
for n := 0; n <= 21; n++ {
if n > 12 && testing.Short() {
break
}
x[n] = 0 // for sais.New
testRec(t, x[:n], 0, 2, &numFail, build)
}
})
t.Run("exhaustive3", func(t *testing.T) {
// All inputs over {0,1,2} up to length 14.
// Runs in about 10 seconds on my laptop.
x := make([]byte, 30)
numFail := 0
for n := 0; n <= 14; n++ {
if n > 8 && testing.Short() {
break
}
x[n] = 0 // for sais.New
testRec(t, x[:n], 0, 3, &numFail, build)
}
})
}
// testRec fills x[i:] with all possible combinations of values in [1,max]
// and then calls testSA(t, x, build) for each one.
func testRec(t testing.T, x []byte, i, max int, numFail int, build func([]byte) []int) {
if i < len(x) {
for x[i] = 1; x[i] <= byte(max); x[i]++ {
testRec(t, x, i+1, max, numFail, build)
}
return
}
if !testSA(t, x, build) {
numFail++
if numFail >= 10 {
t.Errorf("stopping after %d failures", *numFail)
t.FailNow()
}
}
}
// testSA tests the suffix array build function on the input x.
// It constructs the suffix array and then checks that it is correct.
func testSA(t *testing.T, x []byte, build func([]byte) []int) bool {
defer func() {
if e := recover(); e != nil {
t.Logf("build %v", x)
panic(e)
}
}()
sa := build(x)
if len(sa) != len(x) {
t.Errorf("build %v: len(sa) = %d, want %d", x, len(sa), len(x))
return false
}
for i := 0; i+1 < len(sa); i++ {
if sa[i] < 0 || sa[i] >= len(x) || sa[i+1] < 0 || sa[i+1] >= len(x) {
t.Errorf("build %s: sa out of range: %v\n", x, sa)
package main
import "fmt"
import "time"
import "crypto/rand"
import "sync"
import "flag"
import "runtime"
import "github.com/shirou/gopsutil/cpu"
import "github.com/deroproject/astrobwt"
func main() {
fmt.Printf("DERO AstroBWT Miner v0.01 alpha\n")
info, _ := cpu.Info()
fmt.Printf("CPU: %s PhysicalThreads:%d\n", info[0].ModelName, len(info))
threads_ptr := flag.Int("threads", runtime.NumCPU(), "No. Of threads")
iterations_ptr := flag.Int("iterations", 100, "No. Of DERO Stereo POW calculated/thread")
bench_ptr := flag.Bool("bench", true, "run bench with params")
flag.Parse()
var wg sync.WaitGroup
threads := threads_ptr
iterations := iterations_ptr
if threads < 1 || iterations < 1 || threads > 2048 {
fmt.Printf("Invalid parameters\n")
return
}
if *bench_ptr {
fmt.Printf("%20s %20s %20s %20s %20s \n", "Threads", "Total Time", "Total Iterations", "Time/PoW ","Hash Rate/Sec")
for bench := 1; bench <= threads; bench++ {
now := time.Now()
for i := 0; i < bench; i++ {
wg.Add(1)
go random_execution(&wg, iterations)
}
wg.Wait()
duration := time.Now().Sub(now)
fmt.Printf("%20s %20s %20s %20s %20s \n", fmt.Sprintf("%d", bench), fmt.Sprintf("%s", duration), fmt.Sprintf("%d", benchiterations),
fmt.Sprintf("%s", duration/time.Duration(benchiterations)),fmt.Sprintf("%.1f", float32(time.Second)/ (float32(duration/time.Duration(bench*iterations)))) )
}
} else {
fmt.Printf("Starting %d threads\n", threads)
now := time.Now()
for i := 0; i < threads; i++ {
wg.Add(1)
go random_execution(&wg, iterations)
}
wg.Wait()
duration := time.Now().Sub(now)
fmt.Printf("Total iterations %d ( per thread %d)\n", threadsiterations, iterations)
fmt.Printf("Total time %s\n", duration)
fmt.Printf("time per PoW (avg) %s\n", duration/time.Duration(threadsiterations))
}
}
func random_execution(wg *sync.WaitGroup, iterations int) {
var workbuf [255]byte
for i := 0; i < iterations; i++ {
rand.Read(workbuf[:])
//astrobwt.POW(workbuf[:])
astrobwt.POW_0alloc(workbuf[:])
}
wg.Done()github.com/shirou/gopsutil/cpusync.WaitGroupgithub.com/deroproject/astrobwt
// of main chain, but there transactions are honoured,
// they are given 67 % reward
// a block is a side block if it satisfies the following condition
// if block height is less than or equal to height of past 8 topographical blocks
// this is part of consensus rule
// this is the topoheight of this block itself
func (chain *Blockchain) isblock_SideBlock(dbtx storage.DBTX, blid crypto.Hash, block_topoheight int64) (result bool) {
if block_topoheight <= 2 {
return
// for as many block as added
block_height := chain.Load_Height_for_BL_ID(dbtx, blid)
counter := int64(0)
for i := block_topoheight - 1; i >= 0 && counter < config.STABLE_LIMIT; i-- {
counter++
previous_blid, err := chain.Load_Block_Topological_order_at_index(dbtx, i)
if err != nil {
panic("Could not load block from previous order")
}
// height of previous topo ordered block
previous_height := chain.Load_Height_for_BL_ID(dbtx, previous_blid)
if block_height <= previous_height { // lost race (or byzantine behaviour)
return true // give only 67 % rrewar/ // this is the only entrypoint for new / old blocks even for genesis block
// this will add the entire block atomically to the chain
// this is the only function which can add blocks to the chain
// this is exported, so ii can be fed new blocks by p2p layer
// genesis block is no different
// TODO: we should stop mining while adding the new block
func (chain Blockchain) Add_Complete_Block(cbl block.Complete_Block) (err error, result bool) {
var block_hash crypto.Hash
chain.Lock()
defer chain.Unlock()
result = false
dbtx, err := chain.store.BeginTX(true)
if err != nil {
logger.Warnf("Could NOT add block to chain. Error opening writable TX, err %s", err)
return errormsg.ErrInvalidStorageTX, false
}
chain.MINING_BLOCK = true
processing_start := time.Now()
//old_top := chain.Load_TOP_ID() // store top as it may change
defer func() {
// safety so if anything wrong happens, verification fails
if r := recover(); r != nil {
logger.Warnf("Recovered while adding new block, Stack trace below block_hash %s", block_hash)
logger.Warnf("Stack trace \n%s", debug.Stack())
result = false
err = errormsg.ErrPanic
}
chain.MINING_BLOCK = false
if result == true { // block was successfully added, commit it atomically
dbtx.Commit()
rlog.Infof("Block successfully acceppted by chain %s", block_hash)
// gracefully try to instrument
func() {
defer func() {
if r := recover(); r != nil {
rlog.Warnf("Recovered while instrumenting")
rlog.Warnf("Stack trace \n%s", debug.Stack())
}
}()
blockchain_tx_counter.Add(float64(len(cbl.Bl.Tx_hashes)))
block_tx_count.Observe(float64(len(cbl.Bl.Tx_hashes)))
block_processing_time.Observe(float64(time.Now().Sub(processing_start).Round(time.Millisecond) / 1000000))
// tracks counters for tx_size
{
complete_block_size := 0
for i := 0; i < len(cbl.Txs); i++ {
tx_size := len(cbl.Txs[i].Serialize())
complete_block_size += tx_size
transaction_size.Observe(float64(tx_size))
}
block_size.Observe(float64(complete_block_size))
}
}()
//dbtx.Sync() // sync the DB to disk after every execution of this function
//if old_top != chain.Load_TOP_ID() { // if top has changed, discard mining templates and start afresh
// TODO discard mining templates or something else, if top chnages requires some action
//}
} else {
dbtx.Rollback() // if block could not be added, rollback all changes to previous block
rlog.Infof("Block rejected by chain %s err %s", block_hash, err)
}
}()
bl := cbl.Bl // small pointer to block
// first of all lets do some quick checks
// before doing extensive checks
result = false
block_hash = bl.GetHash()
block_logger := logger.WithFields(log.Fields{"blid": block_hash})
// check if block already exist skip it
if chain.Block_Exists(dbtx, block_hash) {
block_logger.Debugf("block already in chain skipping it ")
return errormsg.ErrAlreadyExists, false
}
// only 3 tips allowed in block
if len(bl.Tips) >= 4 {
rlog.Warnf("More than 3 tips present in block %s rejecting", block_hash)
return errormsg.ErrPastMissing, false
}
// check whether the tips exist in our chain, if not reject
if chain.Get_Height() > 0 {
for i := range bl.Tips {
if !chain.Block_Exists(dbtx, bl.Tips[i]) {
rlog.Warnf("Tip %s is NOT present in chain current block %s, skipping it till we get a parent", bl.Tips[i], block_hash)
return errormsg.ErrPastMissing, false
}
}
}
block_height := chain.Calculate_Height_At_Tips(dbtx, bl.Tips)
if block_height == 0 && bl.GetHash() != globals.Config.Genesis_Block_Hash {
block_logger.Warnf("There can can be only one genesis block, reject it, len of tips(%d)", len(bl.Tips))
return errormsg.ErrInvalidBlock, false
}
if block_height < chain.Get_Stable_Height() {
rlog.Warnf("Block %s rejected since it is stale stable height %d block height %d", bl.GetHash(), chain.Get_Stable_Height(), block_height)
return errormsg.ErrInvalidBlock, false
}
// use checksum to quick jump
if chain.checkpints_disabled == false && checkpoints.IsCheckSumKnown(chain.BlockCheckSum(cbl)) {
rlog.Debugf("Skipping Deep Checks for block %s ", block_hash)
goto skip_checks
} else {
rlog.Debugf("Deep Checks for block %s ", block_hash)
}
// version 1 blocks ( old chain) should NOT be mined by used
// they should use hard coded checkpoints
if chain.checkpints_disabled == false && chain.Get_Current_Version_at_Height(block_height) == 1 {
logger.Warnf("v1 blocks cannot be mined (these are imported blocks), rejecting")
return errormsg.ErrInvalidBlock, false
}
/*
// check a small list 100 hashes whether they have been reached
if IsCheckPointKnown_Static(block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1) {
logger.Infof("Static Checkpoint reached at height %d", chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
}
rlog.Tracef(1, "Checking Known checkpoint %s at height %d", block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
//if we have checkpoints embedded, they must match
// until user disables this check
// skip checkpoint check for genesis block
if block_hash != globals.Config.Genesis_Block_Hash {
if chain.checkpints_disabled == false && checkpoints.Length() > chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1 && !checkpoints.IsCheckPointKnown(block_hash, chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1) {
block_logger.Warnf("Block hash mismatch with checkpoint height %d", chain.Load_Height_for_BL_ID(bl.Prev_Hash)+1)
retirement
// make sure time is NOT too much into future, we have 2 seconds of margin here
// some OS have trouble syncing with more than 1 sec granularity
// if clock diff is more than 2 secs, reject the block
if bl.Timestamp > (uint64(time.Now().UTC().Unix()) + config.CRYPTONOTE_FUTURE_TIME_LIMIT) {
block_logger.Warnf("Rejecting Block, timestamp is too much into future, make sure that system clock is correct")
return errormsg.ErrFutureTimestamp, false
}
// verify that the clock is not being run in reverse
// the block timestamp cannot be less than any of the parents
for i := range bl.Tips {
if uint64(chain.Load_Block_Timestamp(dbtx, bl.Tips[i])) > bl.Timestamp {
block_logger.Warnf("Block timestamp is less than its parent, rejecting block")
return errormsg.ErrInvalidTimestamp, false
}
}
//logger.Infof("current version %d height %d", chain.Get_Current_Version_at_Height( 2500), chain.Calculate_Height_At_Tips(dbtx, bl.Tips))
// check whether the major version ( hard fork) is valid
if !chain.Check_Block_Version(dbtx, bl) {
block_logger.Warnf("Rejecting !! Block has invalid fork version actual %d expected %d", bl.Major_Version, chain.Get_Current_Version_at_Height(chain.Calculate_Height_At_Tips(dbtx, bl.Tips)))
return errormsg.ErrInvalidBlock, false
}
// verify whether the tips are unreachable from one another
if !chain.VerifyNonReachability(dbtx, bl) {
block_logger.Warnf("bl.TimestampRejecting !! Block has invalid reachability")
return errormsg.ErrInvalidBlock, false
}
// if the block is referencing any past tip too distant into main chain discard now
// TODO FIXME this need to computed
for i := range bl.Tips {
rusty_tip_base_distance := chain.calculate_mainchain_distance(dbtx, bl.Tips[i])
// tips of deviation >= 8 will rejected
if (int64(chain.Get_Height()) - rusty_tip_base_distance) >= config.STABLE_LIMIT {
block_logger.Warnf("Rusty TIP mined by ROGUE miner discarding block %s best height %d deviation %d rusty_tip %d", bl.Tips[i], chain.Get_Height(), (int64(chain.Get_Height()) - rusty_tip_base_distance), rusty_tip_base_distance)
return errormsg.ErrInvalidBlock, false
}
}
// verify difficulty of tips provided
if len(bl.Tips) > 1 {
best_tip := chain.find_best_tip_cumulative_difficulty(dbtx, bl.Tips)
for i := range bl.Tips {
if best_tip != bl.Tips[i] {
if !chain.validate_tips(dbtx, best_tip, bl.Tips[i]) { // reference is first
block_logger("Rusty tip mined by ROGUE miner, discarding block")
return errormsg.ErrInvalidBlock, false
}
}
}
}
// check whether the block crosses the size limit
// block size is calculate by adding all the txs
// block header/miner tx is excluded, only tx size if calculated
{
block_size := 0
for i := 0; i < len(cbl.Txs); i++ {
block_size += len(cbl.Txs[i].Serialize())
if uint64(block_size) >= config.CRYPTONOTE_MAX_BLOCK_SIZE {
block_logger.Warnf("Block is bigger than max permitted, Rejecting it Actual %d MAX %d ", block_size, config.CRYPTONOTE_MAX_BLOCK_SIZE)
return errormsg.ErrInvalidSize, false
}
}
}
//logger.Infof("pow hash %s height %d", bl.GetPoWHash(), block_height)
// Verify Blocks Proof-Of-Work
// check if the PoW is satisfied
if !chain.VerifyPoW(dbtx, bl) { // if invalid Pow, reject the bloc
block_logger.Warnf("Block has invalid PoW, rejecting it %x", bl.Serialize())
return errormsg.ErrInvalidPoW, false
}
// verify coinbase tx
if !chain.Verify_Transaction_Coinbase(dbtx, cbl, &bl.Miner_TX) {
block_logger.("Miner tx failed verification rejecting ")
// +build !amd64
package cryptonight
// empty function to satisfy
func encryptAESRound(xk uint32, dst, src uint32) {
}
func encrypt10AESRound(xk uint32, dst, src uint32) {
}
Attribution 4.0 International
A declarative, efficient, and flexible JavaScript library for building user interfaces.
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google ❤️ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.