code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package graph
import (
"errors"
"math"
)
//Node represents a graph node
type Node struct {
data interface{}
graph *Graph
edges map[*Node]*Edge
}
//NewNode creates a new node
func NewNode(data interface{}) *Node {
return &Node{
data: &data,
}
}
//Data of the node
func (n *Node) Data() interface{} {
return n.data
}
func (n *Node) addEdge(e *Node, weight float64) error {
if n.graph == nil {
return errors.New("Node needs to be part of a graph before adding edges")
}
if n.edges == nil {
n.edges = make(map[*Node]*Edge, 0)
}
if n.IsNeighbor(e) {
return nil
}
n.edges[e] = &Edge{weight: weight}
return nil
}
//Neighbors of ths node
func (n *Node) Neighbors() []*Node {
if n.edges == nil || len(n.edges) == 0 {
return []*Node{}
}
i := 0
neighbors := make([]*Node, len(n.edges))
for e := range n.edges {
neighbors[i] = e
i++
}
return neighbors
}
//HasEdges if there is at least 1 edge
func (n *Node) HasEdges() bool {
return n.edges != nil && len(n.edges) > 0
}
func (n *Node) removeEdge(e *Node) error {
if n.edges == nil {
return errors.New("Node has no edges")
}
if _, isNeighbor := n.edges[e]; isNeighbor {
delete(n.edges, e)
return nil
}
return errors.New("Edge not found")
}
//IsNeighbor verifies edge to node
func (n *Node) IsNeighbor(node *Node) bool {
if n.edges == nil {
return false
}
_, isNeighbor := n.edges[node]
return isNeighbor
}
//Breadth first search
func (n *Node) bfs() (visited map[*Node]int) {
visited = make(map[*Node]int)
fifo := []*Node{n}
d := 0
for len(fifo) > 0 {
visited[fifo[0]] = d
for _, e := range fifo[0].Neighbors() {
if _, isVisited := visited[e]; !isVisited {
fifo = append(fifo, e)
}
}
fifo = fifo[1:]
d++
}
return
}
//Dijkstra algorithm
func (n *Node) Dijkstra(to *Node) (dist map[*Node]float64, prev map[*Node]*Node) {
dist = make(map[*Node]float64)
prev = make(map[*Node]*Node)
q := make(map[*Node]bool)
for v := range n.graph.nodes {
q[v] = true
prev[v] = nil
dist[v] = math.MaxFloat32
}
dist[n] = 0
for len(q) > 0 {
var u *Node = nil
for v := range q {
if u == nil || dist[v] < dist[u] {
u = v //find u
}
}
if u == to {
return
}
delete(q, u) //remove u from q
for v, e := range u.edges {
if _, found := q[v]; !found {
continue
}
alt := dist[u] + e.weight
if alt < dist[v] {
dist[v] = alt
prev[v] = u
}
}
}
return
}
//PathTo other nodes
func (n *Node) PathTo(node *Node) Path {
_, p := n.Dijkstra(node)
path := Path{node}
for u := p[node]; u != nil; u = p[u] {
path = append(Path{u}, path...)
}
return path
}
//EdgeWeight of a valid edge, 0 other wise
func (n *Node) EdgeWeight(node *Node) float64 {
if !n.IsNeighbor(node) {
return 0
}
return n.edges[node].weight
} | graph/node.go | 0.655115 | 0.461684 | node.go | starcoder |
package sip
import (
"container/list"
"gosips/sip/message"
)
/**
* This interface represents the management interface of a SIP stack
* implementing this specification and as such is the interface that defines
* the management/architectural view of the SIP stack. It defines the methods
* required to represent and provision a proprietary SIP protocol stack.
* <p>
* This SipStack interface defines the methods that are be used by an
* application implementing the {@link javax.sip.SipListener} interface to
* control the architecture and setup of the SIP stack. These methods include:
* <ul>
* <li>Creation/deletion of {@link javax.sip.SipProvider}'s that represent
* messaging objects that can be used by an application to send
* {@link javax.sip.message.Request} and {@link javax.sip.message.Response}
* messages statelessly or statefully via Client and Server transactions.
* <li>Creation/deletion of {@link javax.sip.ListeningPoint}'s that represent
* different ports and transports that a SipProvider can use to send and
* receive messages.
* </ul>
* <b>Architecture:</b><br>
* This specification mandates a single SipStack object per IP Address. There
* is a one-to-many relationship between a SipStack and a SipProvider. There is
* a one-to-many relationship between a SipStack and a ListeningPoint.
* <p>
* <b>SipStack Creation</b><br>
* An application must create a SipStack by invoking the
* {@link SipFactory#createSipStack(Properties)} method, ensuring the
* {@link SipFactory#setPathName(String)} is set. Following the naming
* convention defined in {@link javax.sip.SipFactory}, the implementation of
* the SipStack interface must be called SipStackImpl. This specification also
* defines a stack configuration mechanism using java.util.Properties,
* therefore this constructor must also accept a properties argument:
* <p>
* <center>public SipStackImpl(Properties properties) {}</center>
* <p>
* The following table documents the static configuration properties which can
* be set for an implementation of a SipStack. This specification doesn't preclude
* additional values within a configuration properties object if understood by
* the underlying implementation. In order to change these properties after
* a SipStack has been initialized the SipStack must be deleted and recreated:
* <p>
* <center>
* <table border="1" bordercolorlight="#FFFFFF" bordercolordark="#000000" width="98%" cellpadding="3" cellspacing="0">
* <p class="title"></p>
* <tr bgcolor="#CCCCCC">
* <th align="left" valign="top">
* <p class="table"><strong><strong>SipStack Property</strong></strong>
* </th>
* <th align="left" valign="top">
* </a><p class="table"><strong>Description</strong></p>
* </th>
* </tr>
* <tr>
* <td align="left" valign="top">
* <p class="table">javax.sip.IP_ADDRESS</p>
* </td>
* <td align="left" valign="top">
* <p class="table">Sets the IP Address of the SipStack to the
* property value i.e 192.168.127.12. This property is mandatory.</p>
* </td>
* </tr>
* <tr>
* <td align="left" valign="top">
* <p class="table">javax.sip.STACK_NAME</p>
* </td>
* <td align="left" valign="top">
* <p class="table">Sets a user friendly name to identify the
* underlying stack implementation to the property value i.e.
* NISTv1.1. The stack name property should contain no spaces.
* This property is mandatory.</p>
* </td>
* </tr>
* <tr>
* <td align="left" valign="top">
* <p class="table">javax.sip.OUTBOUND_PROXY</p>
* </td>
* <td align="left" valign="top">
* <p class="table">Sets the outbound proxy of the SIP Stack.
* This property maps to the the outbound proxy parameter of the
* Router interface.
The format of the outbound proxy parameter should be
* "ipaddress:port/transport" i.e. 129.1.22.333:5060/UDP. This
* property is optional.</p>
* </td>
* </tr>
* <tr>
* <td align="left" valign="top">
* <p class="table">javax.sip.ROUTER_PATH</p>
* </td>
* <td align="left" valign="top">
* <p class="table">Sets the fully qualified classpath to the
* application supplied Router object that determines how to route
* messages before a dialog is established i.e. com.sun.javax.sip.RouteImpl.
* An application defined router object must implement the
* javax.sip.Router interface. Different routing policies may be
* based on opertaion mode i.e. User Agent or Proxy. This property is optional.
* </td>
* </tr>
* <tr>
* <td align="left" valign="top">
* <p class="table">javax.sip.EXTENSION_METHODS</p>
* </td>
* <td align="left" valign="top">
* <p class="table">This configuration value informs the underlying
* implementation of supported extension methods that create new
* dialog's. This configuration flag should only be used for dialog
* creating extension methods, other extension methods that
* don't create dialogs can be used using the method parameter on
* Request assuming the implementation understands the method. If more
* than one method is supported in this property each extension
* should be seprated with a colon for example "FOO:BAR". This
* property is optional.</p>
* </td>
* </tr>
* <tr>
* <td align="left" valign="top">
* <p class="table">javax.sip.RETRANSMISSON_FILTER</p>
* </td>
* <td align="left" valign="top">
* <p class="table">The default retransmission behaviour of this
* specification is dependent on the application core and is defined
* as follows:
* <ul>
* <li>User Agent Client: Retransmissions of ACK Requests are the
* responsibility of the application. All other retansmissions are
* handled by the SipProvider.
* <li>User Agent Server: Retransmissions of 1XX, 2XX Responses are the
* responsibility of the application. All other retansmissions are
* handled by the SipProvider.
* <li>Stateful Proxy: As stateful proxies have no Invite
* transactions all retransmissions are handled by the SipProvider.
* <li>Stateless Proxy: As stateless proxies are not transactional
* all retransmissions are the responsibility of the application
* and will not be handled the SipProvider.
* </ul>
* This filter can be viewed as a helper function for User Agents
* that can be set by an application to prevent the application
* from handling retransmission of ACK Requests, 1XX and 2XX
* Responses for INVITE transactions, i.e. the SipProvider will
* handle the retransmissions. This utility is useful for hiding
* protocol retransmission semantics from higher level
* programming environments. The acceptable values are ON/OFF. This
* property is optional, therefore if not supplied the default is
* OFF.
* </td>
* </tr>
* </table>
* </center>
*
* @see SipFactory
* @see SipProvider
*
* @author Sun Microsystems
* @version 1.1
*
*/
type SipStack interface {
/**
* Creates a new peer SipProvider on this SipStack on a specified
* ListeningPoint and returns a reference to the newly created SipProvider
* object. The newly created SipProvider is implicitly attached to this
* SipStack upon execution of this method, by adding the SipProvider to the
* list of SipProviders of this SipStack once it has been successfully
* created.
*
* @return the SipProvider attached to this SipStack on the specified
* ListeningPoint.
* @param listeningPoint the ListeningPoint the SipProvider is to be
* attached to in order to send and receive messages.
* @throws ObjectInUseException if another SipProvider is
* already using the ListeningPoint.
*/
CreateSipProvider(listeningPoint ListeningPoint) (sp SipProvider, ObjectInUseException error)
/**
* Deletes the specified peer SipProvider attached to this SipStack. The
* specified SipProvider is implicitly detached from this SipStack upon
* execution of this method, by removing the SipProvider from the
* SipProviders list of this SipStack. Deletion of a SipProvider does not
* automatically delete the SipProvider's ListeningPoint from the SipStack.
*
* @param sipProvider the peer SipProvider to be deleted from this
* SipStack.
* @throws ObjectInUseException if the specified SipProvider cannot be
* deleted because the SipProvider is currently in use.
*
*/
DeleteSipProvider(sipProvider SipProvider) (ObjectInUseException error)
/**
* Returns an Iterator of existing SipProviders that have been
* created by this SipStack. All of the SipProviders of this SipStack will
* belong to the same stack vendor.
*
* @return an Iterator containing all existing SipProviders created
* by this SipStack. Returns an empty Iterator if no SipProviders exist.
*/
GetSipProviders() *list.List //Iterator
/**
* Creates a new ListeningPoint on this SipStack on a specified
* port and transport, and returns a reference to the newly created
* ListeningPoint object. The newly created ListeningPoint is implicitly
* attached to this SipStack upon execution of this method, by adding the
* ListeningPoint to the List of ListeningPoints of this SipStack once it
* has been successfully created.
*
* @return the ListeningPoint attached to this SipStack.
* @param port the port of the new ListeningPoint.
* @param transport the transport of the new ListeningPoint.
* @throws TansportNotSupportedException if the specified
* transport is not supported by this SipStack.
* @throws InvalidArgumentException if the specified port is invalid.
*
*/
CreateListeningPoint(port int, transport string) (ListeningPoint, error)
//throws TransportNotSupportedException, InvalidArgumentException;
/**
* Deletes the specified ListeningPoint attached to this SipStack. The
* specified ListeningPoint is implicitly detached from this SipStack upon
* execution of this method, by removing the ListeningPoint from the
* ListeningPoints list of this SipStack.
*
* @param listeningPoint the SipProvider to be deleted from this SipStack.
* @throws ObjectInUseException if the specified ListeningPoint cannot be
* deleted because the ListeningPoint is currently in use.
*
*
*/
DeleteListeningPoint(listeningPoint ListeningPoint) (ObjectInUseException error)
/**
* Returns an Iterator of existing ListeningPoints created by this
* SipStack. All of the ListeningPoints of this SipStack belong to the
* same stack vendor.
*
* @return an Iterator containing all existing ListeningPoints created
* by this SipStack. Returns an empty Iterator if no ListeningPoints exist.
*/
GetListeningPoints() *list.List //Iterator
// Configuration methods
/**
* Gets the user friendly name that identifies this SipStack instance. This
* value is set using the Properties object passed to the
* {@link SipFactory#createSipStack(Properties)} method upon creation of
* the SipStack object.
*
* @return a string identifing the stack instance
*/
GetStackName() string
/**
* Gets the IP Address that identifies this SipStack instance. Every
* SipStack object must have an IP Address and only one SipStack object
* can service an IP Address. This value is set using the Properties
* object passed to the {@link SipFactory#createSipStack(Properties)} method upon
* creation of the SipStack object.
*
* @return a string identifing the IP Address
*
*/
GetIPAddress() string
/**
* Gets the Router object that identifies the default Router information
* of this SipStack, including the outbound proxy. This value is set using
* the Properties object passed to the
* {@link SipFactory#createSipStack(Properties)} method upon creation of
* the SipStack object.
*
* @return the Router object identifying the Router information.
*
*/
GetRouter() message.Router
/**
* This method returns the value of the retransmission filter helper
* function for User Agent applications. This value is set using the
* Properties object passed to the
* {@link SipFactory#createSipStack(Properties)} method upon creation of
* the SipStack object.
* <p>
* The default value of the retransmission filter boolean is <var>false</var>.
* When this value is set to <code>true</code>, retransmissions of ACK's and
* 2XX responses to an INVITE transaction are handled
* by the SipProvider, hence the application will not receive
* {@link Timeout#RETRANSMIT} notifications encapsulated in
* {@link javax.sip.TimeoutEvent}'s, however an application will be
* notified if the underlying transaction expires with a
* {@link Timeout#TRANSACTION} notification encapsulated in a TimeoutEvent.
*
* @return the value of the retransmission filter, <code>true</code> if the
* filter is set, <code>false</code> otherwise.
*
*/
IsRetransmissionFilterActive() bool
} | sip/SipStack.go | 0.723114 | 0.502197 | SipStack.go | starcoder |
package i18n
import (
"sort"
"strings"
)
// Country represents a country information. ISO 3166-1
type Country struct {
Alpha2Code string // ISO alpha-2 country code
Alpha3Code string // ISO alpha-3 country code
NumericCode string // ISO numeric country code
Name String
Aliases StringArray
}
// Eaual reports whether two countries are same.
// It compares the alpha-2 code.
func (x *Country) Equal(y *Country) bool {
return strings.EqualFold(x.Alpha2Code, y.Alpha2Code)
}
// Countries represents a collection of Country.
type Countries []*Country
func (cs Countries) Len() int {
return len(cs)
}
func (cs Countries) Less(i, j int) bool {
return cs[i].Alpha2Code < cs[j].Alpha2Code
}
func (cs Countries) Swap(i, j int) {
cs[i], cs[j] = cs[j], cs[i]
}
func (cs Countries) Sort() {
sort.Sort(cs)
}
var (
mapOfAlpha2CodeToCountry map[string]*Country
mapOfAlpha3CodeToCountry map[string]*Country
mapOfNumericCodeToCountry map[string]*Country
listOfAllCountries Countries
)
func init() {
mapOfAlpha2CodeToCountry = make(map[string]*Country)
mapOfAlpha3CodeToCountry = make(map[string]*Country)
mapOfNumericCodeToCountry = make(map[string]*Country)
listOfAllCountries = make(Countries, len(listOfCountryCodes))
for i, codes := range listOfCountryCodes {
alpha2Code := codes[0]
alpha3Code := codes[1]
numericCode := codes[2]
country := &Country{
Alpha2Code: alpha2Code,
Alpha3Code: alpha3Code,
NumericCode: numericCode,
Name: make(String),
Aliases: make(StringArray),
}
mapOfAlpha2CodeToCountry[alpha2Code] = country
mapOfAlpha3CodeToCountry[alpha3Code] = country
mapOfNumericCodeToCountry[numericCode] = country
listOfAllCountries[i] = country
}
listOfAllCountries.Sort()
}
// AllCountries returns the list of all countries.
func AllCountries() Countries {
return listOfAllCountries
}
// LookupCountry returns the country by given code.
func LookupCountry(code string) (*Country, bool) {
c := strings.ToUpper(code)
// alpha-2 code
if len(c) == 2 {
if v, ok := mapOfAlpha2CodeToCountry[c]; ok {
return v, true
}
}
// alpha-3 & numeric code
if len(c) == 3 {
if v, ok := mapOfAlpha3CodeToCountry[c]; ok {
return v, true
}
if v, ok := mapOfNumericCodeToCountry[c]; ok {
return v, true
}
}
return nil, false
}
// SearchCountries returns the countries by given keyword with specified language.
func SearchCountries(lang *Language, keyword string) Countries {
kw := strings.ToLower(keyword)
found := make(map[string]*Country)
for _, country := range listOfAllCountries {
// compare name
if name, ok := country.Name.Get(lang); ok {
if strings.Contains(strings.ToLower(name), kw) {
found[country.Alpha2Code] = country
}
}
// compare aliases
if aliases, ok := country.Aliases.Get(lang); ok {
str := strings.Join(aliases, StringArrayValueSeparator)
if strings.Contains(strings.ToLower(str), kw) {
found[country.Alpha2Code] = country
}
}
}
countries := make(Countries, len(found))
index := 0
for _, country := range found {
countries[index] = country
index++
}
return countries
} | i18n/country.go | 0.739046 | 0.477006 | country.go | starcoder |
package parcom
type position struct {
lineIndex, columnIndex int // -1 indicates invalid position.
}
// PositionalState is a position-aware parser state.
type PositionalState struct {
State
position position
}
// NewPositionalState creates a parser state.
func NewPositionalState(s string) *PositionalState {
return &PositionalState{*NewState(s), position{-1, -1}}
}
// WithPosition creates a parser saving a current position.
func (s *PositionalState) WithPosition(p Parser) Parser {
return func() (interface{}, error) {
pp := s.position
s.position = position{s.lineIndex, s.columnIndex}
defer func() { s.position = pp }()
return p()
}
}
// Block parses a block of a given parser.
func (s *PositionalState) Block(p Parser) Parser {
return s.WithPosition(s.Many(s.SameColumn(p)))
}
// Block1 is the same as Block but blocks must have at least one element.
func (s *PositionalState) Block1(p Parser) Parser {
return s.WithPosition(s.Many1(s.SameColumn(p)))
}
// WithBlock creates a parser which parses a block of the second parsers prefixed
// by the first parser. Blocks must have at least one element.
func (s *PositionalState) WithBlock(p, pp Parser) Parser {
return s.withBlock(s.Block, p, pp)
}
// WithBlock1 creates a parser which parses a block of the second parsers prefixed
// by the first parser. Blocks must have at least one element.
func (s *PositionalState) WithBlock1(p, pp Parser) Parser {
return s.withBlock(s.Block1, p, pp)
}
func (s *PositionalState) withBlock(b func(Parser) Parser, p, pp Parser) Parser {
return s.WithPosition(s.And(p, s.SameLineOrIndent(b(pp))))
}
// HeteroBlock creates a parser to parse something all in the same column.
func (s *PositionalState) HeteroBlock(ps ...Parser) Parser {
qs := make([]Parser, 0, len(ps))
for _, p := range ps {
qs = append(qs, s.SameColumn(p))
}
return s.WithPosition(s.And(qs...))
}
// ExhaustiveBlock parses a block of a given parser exhaustively.
func (s *PositionalState) ExhaustiveBlock(p Parser) Parser {
return s.WithPosition(s.ExhaustiveMany(s.SameColumn(p)))
}
// Indent creates a parser which parses an indent before running a given parser.
// It is equivalent to a given parser and parses no indent if no position is
// saved beforehand.
func (s *PositionalState) Indent(p Parser) Parser {
return func() (interface{}, error) {
if s.position.columnIndex >= 0 && s.columnIndex <= s.position.columnIndex {
return nil, NewError("invalid indent", &s.State)
}
return p()
}
}
// SameLine creates a parser which parses something in the same line.
func (s *PositionalState) SameLine(p Parser) Parser {
return func() (interface{}, error) {
if s.position.columnIndex >= 0 && s.lineIndex != s.position.lineIndex {
return nil, NewError("should be in the same line", &s.State)
}
return p()
}
}
// SameLineOrIndent creates a parser which parses something in the same line or indented.
func (s *PositionalState) SameLineOrIndent(p Parser) Parser {
return s.Or(s.SameLine(p), s.Indent(p))
}
// SameColumn creates a parser which parses something in the same column.
func (s *PositionalState) SameColumn(p Parser) Parser {
return func() (interface{}, error) {
if s.columnIndex != s.position.columnIndex {
return nil, NewError("invalid indent", &s.State)
}
return p()
}
} | positional_state.go | 0.824285 | 0.639497 | positional_state.go | starcoder |
package typ
import "sort"
func NewAlt(alts ...Type) (res Type) {
res = Type{KindAlt, &Info{Params: make([]Param, 0, len(alts)*2)}}
return addAlts(res, alts)
}
// Alt returns a new type alternative for a list of types. Other alternatives are flattened.
// If the first type is already an alternative, the following types are added.
func Alt(alts ...Type) (res Type) {
if len(alts) == 0 {
return Void
}
if fst := alts[0]; isAlt(fst) {
return addAlts(fst, alts[1:])
}
return NewAlt(alts...)
}
// Choose returns type t with all type alternatives reduced to its most specific representation.
func Choose(t Type) (_ Type, err error) {
return choose(t, nil)
}
func choose(t Type, hist []*Info) (_ Type, err error) {
if t.Kind&MaskRef != KindAlt {
if !t.HasParams() {
return t, nil
}
for i := 0; i < len(hist); i++ {
h := hist[len(hist)-1-i]
if t.Info == h {
return t, nil
}
}
hist = append(hist, t.Info)
var ps []Param
for i, p := range t.Params {
p.Type, err = choose(p.Type, hist)
if err != nil {
return Void, err
}
if ps == nil {
ps = make([]Param, 0, len(t.Params))
ps = append(ps, t.Params[:i]...)
}
ps = append(ps, p)
}
if ps != nil {
nfo := *t.Info
nfo.Params = ps
t.Info = &nfo
}
return t, nil
}
if !t.HasParams() {
return Void, nil
}
var a, b, tmp Type
for i, p := range t.Params {
if i == 0 {
a = p.Type
continue
}
a, tmp, err = Common(a, p.Type)
if err != nil {
return Void, err
}
if b == Void || b.Kind > tmp.Kind {
b = tmp
} else {
b = a
}
}
if b != Void {
return b, nil
}
return a, nil
}
func fixAny(t Type) Kind {
if t.Kind&KindAny == KindAny {
return 0
}
return t.Kind
}
func hasAlt(t, alt Type) bool {
ps := t.Params
i := sort.Search(len(ps), func(i int) bool {
return fixAny(ps[i].Type) >= fixAny(alt)
})
return i < len(ps) && ps[i].Type == alt
}
func addAlt(t, a Type) Type {
if a.Kind != KindAlt {
ps := t.Params
i := sort.Search(len(ps), func(i int) bool {
return fixAny(ps[i].Type) >= fixAny(a)
})
if i >= len(ps) {
ps = append(ps, Param{Type: a})
} else if ps[i].Type != a {
ps = append(ps[:i+1], ps[i:]...)
ps[i] = Param{Type: a}
}
t.Params = ps
} else if a.ParamLen() > 0 {
for _, p := range a.Params {
t = addAlt(t, p.Type)
}
}
return t
}
func addAlts(t Type, alts []Type) Type {
for _, a := range alts {
t = addAlt(t, a)
}
return t
} | typ/alts.go | 0.617167 | 0.404096 | alts.go | starcoder |
package bsoncore
import (
"errors"
"io"
"go.mongodb.org/mongo-driver/bson/bsontype"
)
// DocumentSequenceStyle is used to represent how a document sequence is laid out in a slice of
// bytes.
type DocumentSequenceStyle uint32
// These constants are the valid styles for a DocumentSequence.
const (
_ DocumentSequenceStyle = iota
SequenceStyle
ArrayStyle
)
// DocumentSequence represents a sequence of documents. The Style field indicates how the documents
// are laid out inside of the Data field.
type DocumentSequence struct {
Style DocumentSequenceStyle
Data []byte
Pos int
}
// ErrCorruptedDocument is returned when a full document couldn't be read from the sequence.
var ErrCorruptedDocument = errors.New("invalid DocumentSequence: corrupted document")
// ErrNonDocument is returned when a DocumentSequence contains a non-document BSON value.
var ErrNonDocument = errors.New("invalid DocumentSequence: a non-document value was found in sequence")
// ErrInvalidDocumentSequenceStyle is returned when an unknown DocumentSequenceStyle is set on a
// DocumentSequence.
var ErrInvalidDocumentSequenceStyle = errors.New("invalid DocumentSequenceStyle")
// DocumentCount returns the number of documents in the sequence.
func (ds *DocumentSequence) DocumentCount() int {
if ds == nil {
return 0
}
switch ds.Style {
case SequenceStyle:
var count int
var ok bool
rem := ds.Data
for len(rem) > 0 {
_, rem, ok = ReadDocument(rem)
if !ok {
return 0
}
count++
}
return count
case ArrayStyle:
_, rem, ok := ReadLength(ds.Data)
if !ok {
return 0
}
var count int
for len(rem) > 1 {
_, rem, ok = ReadElement(rem)
if !ok {
return 0
}
count++
}
return count
default:
return 0
}
}
// Empty returns true if the sequence is empty. It always returns true for unknown sequence styles.
func (ds *DocumentSequence) Empty() bool {
if ds == nil {
return true
}
switch ds.Style {
case SequenceStyle:
return len(ds.Data) == 0
case ArrayStyle:
return len(ds.Data) <= 5
default:
return true
}
}
//ResetIterator resets the iteration point for the Next method to the beginning of the document
//sequence.
func (ds *DocumentSequence) ResetIterator() {
if ds == nil {
return
}
ds.Pos = 0
}
// Documents returns a slice of the documents. If nil either the Data field is also nil or could not
// be properly read.
func (ds *DocumentSequence) Documents() ([]Document, error) {
if ds == nil {
return nil, nil
}
switch ds.Style {
case SequenceStyle:
rem := ds.Data
var docs []Document
var doc Document
var ok bool
for {
doc, rem, ok = ReadDocument(rem)
if !ok {
if len(rem) == 0 {
break
}
return nil, ErrCorruptedDocument
}
docs = append(docs, doc)
}
return docs, nil
case ArrayStyle:
if len(ds.Data) == 0 {
return nil, nil
}
vals, err := Document(ds.Data).Values()
if err != nil {
return nil, ErrCorruptedDocument
}
docs := make([]Document, 0, len(vals))
for _, v := range vals {
if v.Type != bsontype.EmbeddedDocument {
return nil, ErrNonDocument
}
docs = append(docs, v.Data)
}
return docs, nil
default:
return nil, ErrInvalidDocumentSequenceStyle
}
}
// Next retrieves the next document from this sequence and returns it. This method will return
// io.EOF when it has reached the end of the sequence.
func (ds *DocumentSequence) Next() (Document, error) {
if ds == nil || ds.Pos >= len(ds.Data) {
return nil, io.EOF
}
switch ds.Style {
case SequenceStyle:
doc, _, ok := ReadDocument(ds.Data[ds.Pos:])
if !ok {
return nil, ErrCorruptedDocument
}
ds.Pos += len(doc)
return doc, nil
case ArrayStyle:
if ds.Pos < 4 {
if len(ds.Data) < 4 {
return nil, ErrCorruptedDocument
}
ds.Pos = 4 // Skip the length of the document
}
if len(ds.Data[ds.Pos:]) == 1 && ds.Data[ds.Pos] == 0x00 {
return nil, io.EOF // At the end of the document
}
elem, _, ok := ReadElement(ds.Data[ds.Pos:])
if !ok {
return nil, ErrCorruptedDocument
}
ds.Pos += len(elem)
val := elem.Value()
if val.Type != bsontype.EmbeddedDocument {
return nil, ErrNonDocument
}
return val.Data, nil
default:
return nil, ErrInvalidDocumentSequenceStyle
}
} | vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go | 0.623033 | 0.438064 | document_sequence.go | starcoder |
package mtest
import (
"github.com/MaxBreida/mongo-go-driver/bson"
)
// BatchIdentifier specifies the keyword to identify the batch in a cursor response.
type BatchIdentifier string
// These constants specify valid values for BatchIdentifier.
const (
FirstBatch BatchIdentifier = "firstBatch"
NextBatch BatchIdentifier = "nextBatch"
)
// CommandError is a representation of a command error from the server.
type CommandError struct {
Code int32
Message string
Name string
Labels []string
}
// WriteError is a representation of a write error from the server.
type WriteError struct {
Index int
Code int
Message string
}
// WriteConcernError is a representation of a write concern error from the server.
type WriteConcernError struct {
Name string `bson:"codeName"`
Code int `bson:"code"`
Message string `bson:"errmsg"`
Details bson.Raw `bson:"errInfo"`
}
// CreateCursorResponse creates a response for a cursor command.
func CreateCursorResponse(cursorID int64, ns string, identifier BatchIdentifier, batch ...bson.D) bson.D {
batchArr := bson.A{}
for _, doc := range batch {
batchArr = append(batchArr, doc)
}
return bson.D{
{"ok", 1},
{"cursor", bson.D{
{"id", cursorID},
{"ns", ns},
{string(identifier), batchArr},
}},
}
}
// CreateCommandErrorResponse creates a response with a command error.
func CreateCommandErrorResponse(ce CommandError) bson.D {
res := bson.D{
{"ok", 0},
{"code", ce.Code},
{"errmsg", ce.Message},
{"codeName", ce.Name},
}
if len(ce.Labels) > 0 {
var labelsArr bson.A
for _, label := range ce.Labels {
labelsArr = append(labelsArr, label)
}
res = append(res, bson.E{Key: "labels", Value: labelsArr})
}
return res
}
// CreateWriteErrorsResponse creates a response with one or more write errors.
func CreateWriteErrorsResponse(writeErrorrs ...WriteError) bson.D {
arr := make(bson.A, len(writeErrorrs))
for idx, we := range writeErrorrs {
arr[idx] = bson.D{
{"index", we.Index},
{"code", we.Code},
{"errmsg", we.Message},
}
}
return bson.D{
{"ok", 1},
{"writeErrors", arr},
}
}
// CreateWriteConcernErrorResponse creates a response with a write concern error.
func CreateWriteConcernErrorResponse(wce WriteConcernError) bson.D {
wceDoc := bson.D{
{"code", wce.Code},
{"codeName", wce.Name},
{"errmsg", wce.Message},
}
if len(wce.Details) > 0 {
wceDoc = append(wceDoc, bson.E{Key: "errInfo", Value: wce.Details})
}
return bson.D{
{"ok", 1},
{"writeConcernError", wceDoc},
}
}
// CreateSuccessResponse creates a response for a successful operation with the given elements.
func CreateSuccessResponse(elems ...bson.E) bson.D {
res := bson.D{
{"ok", 1},
}
return append(res, elems...)
} | mongo/integration/mtest/deployment_helpers.go | 0.693369 | 0.441011 | deployment_helpers.go | starcoder |
package block
import (
"fmt"
"github.com/df-mc/dragonfly/server/item"
)
// CoralType represents a type of coral of a block. CoralType, coral fans, and coral blocks carry one of these types.
type CoralType struct {
coral
}
// TubeCoral returns the tube coral variant
func TubeCoral() CoralType {
return CoralType{0}
}
// BrainCoral returns the brain coral variant
func BrainCoral() CoralType {
return CoralType{1}
}
// BubbleCoral returns the bubble coral variant
func BubbleCoral() CoralType {
return CoralType{2}
}
// FireCoral returns the fire coral variant
func FireCoral() CoralType {
return CoralType{3}
}
// HornCoral returns the horn coral variant
func HornCoral() CoralType {
return CoralType{4}
}
// CoralTypes returns all coral types.
func CoralTypes() []CoralType {
return []CoralType{TubeCoral(), BrainCoral(), BubbleCoral(), FireCoral(), HornCoral()}
}
type coral uint8
// Uint8 returns the coral as a uint8.
func (c coral) Uint8() uint8 {
return uint8(c)
}
// Colour returns the colour of the CoralType.
func (c coral) Colour() item.Colour {
switch c {
case 0:
return item.ColourBlue()
case 1:
return item.ColourPink()
case 2:
return item.ColourPurple()
case 3:
return item.ColourRed()
case 4:
return item.ColourYellow()
}
panic("unknown coral type")
}
// Name ...
func (c coral) Name() string {
switch c {
case 0:
return "Tube Coral"
case 1:
return "Brain Coral"
case 2:
return "Bubble Coral"
case 3:
return "Fire Coral"
case 4:
return "Horn Coral"
}
panic("unknown coral type")
}
// FromString ...
func (c coral) FromString(s string) (interface{}, error) {
switch s {
case "tube":
return CoralType{coral(0)}, nil
case "brain":
return CoralType{coral(1)}, nil
case "bubble":
return CoralType{coral(2)}, nil
case "fire":
return CoralType{coral(3)}, nil
case "horn":
return CoralType{coral(4)}, nil
}
return nil, fmt.Errorf("unexpected coral type '%v', expecting one of 'tube', 'brain', 'bubble', 'fire', or 'horn'", s)
}
// String ...
func (c coral) String() string {
switch c {
case 0:
return "tube"
case 1:
return "brain"
case 2:
return "bubble"
case 3:
return "fire"
case 4:
return "horn"
}
panic("unknown coral type")
} | server/block/coral_type.go | 0.740831 | 0.44565 | coral_type.go | starcoder |
package stringnorm
import (
"errors"
)
// ErrNormalizeComplete is a sentinel value returned by a normalizer to
// request that no other normalizers be run.
var ErrNormalizeComplete = errors.New("ErrNormalizeComplete")
// A Normalizer normalizes a string value.
type Normalizer interface {
// Normalize copies the given text and normalizes and returns the copy.
// If the normalizer does not recognize the text, it must return the
// original text.
// If the normalizer wishes to declare its result final, it must return
// the next text and ErrNormalizeComplete.
// If the normalizer wishes to reject the text as invalid, it may return
// any other error.
Normalize(text string) (string, error)
}
// A List of Normalizers, which applies each Normalizer in order.
type List []Normalizer
// Normalize applies each normalizer in n to text, returning the final value.
// If any normalizer returns ErrNormalizeComplete, the remaining normalizers
// are short-circuited.
func (n List) Normalize(text string) (string, error) {
var err error
for _, norm := range n {
text, err = norm.Normalize(text)
if err != nil {
if err == ErrNormalizeComplete {
return text, nil
}
return text, err
}
}
return text, nil
}
// Normalize applies the list of string normalizers to text.
func Normalize(normalizers []Normalizer, text string) (string, error) {
return List(normalizers).Normalize(text)
}
// Combine combines a list of normalizers into a single Normalizer
// instance that applies each normalizer in order as a List does.
func Combine(normalizers ...Normalizer) Normalizer {
combined := make(List, 0, len(normalizers))
for _, norm := range normalizers {
if norm != nil {
combined = append(combined, norm)
}
}
if len(combined) == 1 {
return combined[0]
}
return combined
}
// NormalizeNoErr applies normalizer to text; errors are silently ignored,
// and the original text is returned on error.
func NormalizeNoErr(normalizer Normalizer, text string) string {
res, err := normalizer.Normalize(text)
if err != nil {
return text
}
return res
} | stringnorm/stringnorm.go | 0.551332 | 0.446495 | stringnorm.go | starcoder |
package classics
/*
Alice is taking a cryptography class and finding anagrams to be very useful.
We consider two strings to be anagrams of each other if the first string's
letters can be rearranged to form the second string. In other words, both
strings must contain the same exact letters in the same exact frequency
For example, bacdc and dcbac are anagrams, but bacdc and dcbad are not.
Alice decides on an encryption scheme involving two large strings where encryption
is dependent on the minimum number of character deletions required to make the two
strings anagrams. Can you help her find this number?
Given two strings, and , that may or may not be of the same length, determine
the minimum number of character deletions required to make and anagrams.
Any characters can be deleted from either of the strings.
For example, if and , we can delete from string and from string so that
both remaining strings are and which are anagrams.
Function Description
Complete the makeAnagram function in the editor below.
It must return an integer representing the minimum total
characters that must be deleted to make the strings anagrams.
makeAnagram has the following parameter(s):
a: a string
b: a string
*/
/* Solution:
Calculate a frequency table for a.
For each char in b remove that char from the frequency table.
For each value in the freq table calculate the abs sum
Return sum that denotes the minimum amount of chars to make the 2 strings anagrams => having the same freq table.
*/
func MakeAnagram(a string, b string) int32 {
// Step 1. Calculate freq table of a
count := int32(0)
frequencies := make(map[rune]int32)
for _, val := range a {
frequencies[val] = frequencies[val] + 1
}
// Step 2. Calculate freq table of a - b
for _, val := range b {
frequencies[val] = frequencies[val] - 1
}
// Step 3. We will need to add all the absolute values to
// find the minimum number of removals so that a and b will have the same freq table
for _, val := range frequencies {
if val < 0 {
count += -(val)
} else {
count += val
}
}
return count
} | classics/makingAnagrams.go | 0.674801 | 0.67209 | makingAnagrams.go | starcoder |
package tracer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"github.com/Jeffail/benthos/v3/lib/util/config"
yaml "gopkg.in/yaml.v3"
)
//------------------------------------------------------------------------------
// Errors for the tracer package.
var (
ErrInvalidTracerType = errors.New("invalid tracer type")
)
//------------------------------------------------------------------------------
// TypeSpec is a constructor and a usage description for each tracer type.
type TypeSpec struct {
constructor func(conf Config, opts ...func(Type)) (Type, error)
description string
sanitiseConfigFunc func(conf Config) (interface{}, error)
}
// Constructors is a map of all tracer types with their specs.
var Constructors = map[string]TypeSpec{}
//------------------------------------------------------------------------------
// String constants representing each tracer type.
const (
TypeJaeger = "jaeger"
TypeNone = "none"
)
//------------------------------------------------------------------------------
// Type is an interface implemented by all tracer types.
type Type interface {
// Close stops and cleans up the tracers resources.
Close() error
}
//------------------------------------------------------------------------------
// Config is the all encompassing configuration struct for all tracer types.
type Config struct {
Type string `json:"type" yaml:"type"`
Jaeger JaegerConfig `json:"jaeger" yaml:"jaeger"`
None struct{} `json:"none" yaml:"none"`
}
// NewConfig returns a configuration struct fully populated with default values.
func NewConfig() Config {
return Config{
Type: TypeNone,
Jaeger: NewJaegerConfig(),
None: struct{}{},
}
}
// SanitiseConfig returns a sanitised version of the Config, meaning sections
// that aren't relevant to behaviour are removed.
func SanitiseConfig(conf Config) (interface{}, error) {
cBytes, err := json.Marshal(conf)
if err != nil {
return nil, err
}
hashMap := map[string]interface{}{}
if err = json.Unmarshal(cBytes, &hashMap); err != nil {
return nil, err
}
outputMap := config.Sanitised{}
t := conf.Type
outputMap["type"] = t
if sfunc := Constructors[t].sanitiseConfigFunc; sfunc != nil {
if outputMap[t], err = sfunc(conf); err != nil {
return nil, err
}
} else {
outputMap[t] = hashMap[t]
}
return outputMap, nil
}
//------------------------------------------------------------------------------
// UnmarshalYAML ensures that when parsing configs that are in a map or slice
// the default values are still applied.
func (c *Config) UnmarshalYAML(value *yaml.Node) error {
type confAlias Config
aliased := confAlias(NewConfig())
if err := value.Decode(&aliased); err != nil {
return fmt.Errorf("line %v: %v", value.Line, err)
}
var raw interface{}
if err := value.Decode(&raw); err != nil {
return fmt.Errorf("line %v: %v", value.Line, err)
}
if typeCandidates := config.GetInferenceCandidates(raw); len(typeCandidates) > 0 {
var inferredType string
for _, tc := range typeCandidates {
if _, exists := Constructors[tc]; exists {
if len(inferredType) > 0 {
return fmt.Errorf("line %v: unable to infer type, multiple candidates '%v' and '%v'", value.Line, inferredType, tc)
}
inferredType = tc
}
}
if len(inferredType) == 0 {
return fmt.Errorf("line %v: unable to infer type, candidates were: %v", value.Line, typeCandidates)
}
aliased.Type = inferredType
}
*c = Config(aliased)
return nil
}
//------------------------------------------------------------------------------
var header = "This document was generated with `benthos --list-tracers`" + `
A tracer type represents a destination for Benthos to send opentracing events to
such as [Jaeger](https://www.jaegertracing.io/).
When a tracer is configured all messages will be allocated a root span during
ingestion that represents their journey through a Benthos pipeline. Many Benthos
processors create spans, and so opentracing is a great way to analyse the
pathways of individual messages as they progress through a Benthos instance.
Some inputs, such as ` + "`http_server` and `http_client`" + `, are capable of
extracting a root span from the source of the message (HTTP headers). This is
a work in progress and should eventually expand so that all inputs have a way of
doing so.
A tracer config section looks like this:
` + "``` yaml" + `
tracer:
type: foo
foo:
bar: baz
` + "```" + `
WARNING: Although the configuration spec of this component is stable the format
of spans, tags and logs created by Benthos is subject to change as it is tuned
for improvement.`
// Descriptions returns a formatted string of collated descriptions of each
// type.
func Descriptions() string {
// Order our types alphabetically
names := []string{}
for name := range Constructors {
names = append(names, name)
}
sort.Strings(names)
buf := bytes.Buffer{}
buf.WriteString("Tracer Types\n")
buf.WriteString(strings.Repeat("=", 12))
buf.WriteString("\n\n")
buf.WriteString(header)
buf.WriteString("\n\n")
// Append each description
for i, name := range names {
var confBytes []byte
conf := NewConfig()
conf.Type = name
if confSanit, err := SanitiseConfig(conf); err == nil {
confBytes, _ = config.MarshalYAML(confSanit)
}
buf.WriteString("## ")
buf.WriteString("`" + name + "`")
buf.WriteString("\n")
if confBytes != nil {
buf.WriteString("\n``` yaml\n")
buf.Write(confBytes)
buf.WriteString("```\n")
}
buf.WriteString(Constructors[name].description)
if i != (len(names) - 1) {
buf.WriteString("\n\n")
}
}
return buf.String()
}
// New creates a tracer type based on a configuration.
func New(conf Config, opts ...func(Type)) (Type, error) {
if c, ok := Constructors[conf.Type]; ok {
return c.constructor(conf, opts...)
}
return nil, ErrInvalidTracerType
}
//------------------------------------------------------------------------------ | lib/tracer/constructor.go | 0.794505 | 0.411939 | constructor.go | starcoder |
package hash
import (
"fmt"
"math/big"
"math/rand"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
const (
// HashLength is the expected length of the hash
HashLength = 32
)
var (
// Zero is an empty hash.
Zero = Hash{}
hashT = reflect.TypeOf(Hash{})
)
// Hash represents the 32 byte hash of arbitrary data.
type Hash [HashLength]byte
type Hashes []Hash
type HashesSet map[Hash]struct{}
// BytesToHash sets b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
// BigToHash sets byte representation of b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
// HexToHash sets byte representation of s to hash.
// If b is larger than len(h), b will be cropped from the left.
func HexToHash(s string) Hash { return BytesToHash(hexutil.MustDecode(s)) }
// Bytes gets the byte representation of the underlying hash.
func (h Hash) Bytes() []byte { return h[:] }
// Big converts a hash to a big integer.
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
// Hex converts a hash to a hex string.
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
return fmt.Sprintf("%x…%x", h[:3], h[29:])
}
// String implements the stringer interface and is used also by the logger when
// doing full logging into a file.
func (h Hash) String() string {
return h.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (h Hash) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), h[:])
}
// UnmarshalText parses a hash in hex syntax.
func (h *Hash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Hash", input, h[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (h *Hash) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(hashT, input, h[:])
}
// MarshalText returns the hex representation of h.
func (h Hash) MarshalText() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalText()
}
// setBytes sets the hash to the value of b.
// If b is larger than len(h), b will be cropped from the left.
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
// FakeHash generates random fake hash for testing purpose.
func FakeHash(seed ...int64) (h common.Hash) {
randRead := rand.Read
if len(seed) > 0 {
src := rand.NewSource(seed[0])
rnd := rand.New(src)
randRead = rnd.Read
}
_, err := randRead(h[:])
if err != nil {
panic(err)
}
return
}
/*
* HashesSet methods:
*/
// NewHashesSet makes hash index.
func NewHashesSet(h ...Hash) HashesSet {
hh := HashesSet{}
hh.Add(h...)
return hh
}
// Copy copies hashes to a new structure.
func (hh HashesSet) Copy() HashesSet {
ee := make(HashesSet, len(hh))
for k, v := range hh {
ee[k] = v
}
return ee
}
// String returns human readable string representation.
func (hh HashesSet) String() string {
ss := make([]string, 0, len(hh))
for h := range hh {
ss = append(ss, h.String())
}
return "[" + strings.Join(ss, ", ") + "]"
}
// Slice returns whole index as slice.
func (hh HashesSet) Slice() Hashes {
arr := make(Hashes, len(hh))
i := 0
for h := range hh {
arr[i] = h
i++
}
return arr
}
// Add appends hash to the index.
func (hh HashesSet) Add(hash ...Hash) {
for _, h := range hash {
hh[h] = struct{}{}
}
return
}
// Erase erase hash from the index.
func (hh HashesSet) Erase(hash ...Hash) {
for _, h := range hash {
delete(hh, h)
}
return
}
// Contains returns true if hash is in.
func (hh HashesSet) Contains(hash Hash) bool {
_, ok := hh[hash]
return ok
}
/*
* Hashes methods:
*/
// NewHashes makes hash slice.
func NewHashes(h ...Hash) Hashes {
hh := Hashes{}
hh.Add(h...)
return hh
}
// Copy copies hashes to a new structure.
func (hh Hashes) Copy() Hashes {
ee := make(Hashes, len(hh))
for k, v := range hh {
ee[k] = v
}
return ee
}
// String returns human readable string representation.
func (hh Hashes) String() string {
ss := make([]string, 0, len(hh))
for _, h := range hh {
ss = append(ss, h.String())
}
return "[" + strings.Join(ss, ", ") + "]"
}
// Set returns whole index as a HashesSet.
func (hh Hashes) Set() HashesSet {
set := make(HashesSet, len(hh))
for _, h := range hh {
set[h] = struct{}{}
}
return set
}
// Add appends hash to the slice.
func (hh *Hashes) Add(hash ...Hash) {
*hh = append(*hh, hash...)
} | hash/hash.go | 0.76986 | 0.449211 | hash.go | starcoder |
package chart
import (
"fmt"
"math"
util "github.com/t-mw/go-chart/util"
)
// AnnotationSeries is a series of labels on the chart.
type AnnotationSeries struct {
Name string
Style Style
YAxis YAxisType
Annotations []Value2
}
// GetName returns the name of the time series.
func (as AnnotationSeries) GetName() string {
return as.Name
}
// GetStyle returns the line style.
func (as AnnotationSeries) GetStyle() Style {
return as.Style
}
// GetYAxis returns which YAxis the series draws on.
func (as AnnotationSeries) GetYAxis() YAxisType {
return as.YAxis
}
func (as AnnotationSeries) annotationStyleDefaults(defaults Style) Style {
return Style{
FontColor: DefaultTextColor,
Font: defaults.Font,
FillColor: DefaultAnnotationFillColor,
FontSize: DefaultAnnotationFontSize,
StrokeColor: defaults.StrokeColor,
StrokeWidth: defaults.StrokeWidth,
Padding: DefaultAnnotationPadding,
}
}
// Measure returns a bounds box of the series.
func (as AnnotationSeries) Measure(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) Box {
box := Box{
Top: math.MaxInt32,
Left: math.MaxInt32,
Right: 0,
Bottom: 0,
}
if as.Style.IsZero() || as.Style.Show {
seriesStyle := as.Style.InheritFrom(as.annotationStyleDefaults(defaults))
for _, a := range as.Annotations {
style := a.Style.InheritFrom(seriesStyle)
lx := canvasBox.Left + xrange.Translate(a.XValue)
ly := canvasBox.Bottom - yrange.Translate(a.YValue)
ab := Draw.MeasureAnnotation(r, canvasBox, style, lx, ly, a.Label)
box.Top = util.Math.MinInt(box.Top, ab.Top)
box.Left = util.Math.MinInt(box.Left, ab.Left)
box.Right = util.Math.MaxInt(box.Right, ab.Right)
box.Bottom = util.Math.MaxInt(box.Bottom, ab.Bottom)
}
}
return box
}
// Render draws the series.
func (as AnnotationSeries) Render(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) {
if as.Style.IsZero() || as.Style.Show {
seriesStyle := as.Style.InheritFrom(as.annotationStyleDefaults(defaults))
for _, a := range as.Annotations {
style := a.Style.InheritFrom(seriesStyle)
lx := canvasBox.Left + xrange.Translate(a.XValue)
ly := canvasBox.Bottom - yrange.Translate(a.YValue)
Draw.Annotation(r, canvasBox, style, lx, ly, a.Label)
}
}
}
// Validate validates the series.
func (as AnnotationSeries) Validate() error {
if len(as.Annotations) == 0 {
return fmt.Errorf("annotation series requires annotations to be set and not empty")
}
return nil
} | annotation_series.go | 0.835013 | 0.462048 | annotation_series.go | starcoder |
package api
import (
"fmt"
"regexp"
"strings"
"github.com/mattermost/chewbacca/internal/utils"
"github.com/mattermost/chewbacca/model"
"github.com/google/go-github/v31/github"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// ReleaseNoteLabelNeeded defines the label used when a missing release-note label is blocking the
// merge.
ReleaseNoteLabelNeeded = "do-not-merge/release-note-label-needed"
releaseNote = "release-note"
releaseNoteNone = "release-note-none"
releaseNoteActionRequired = "release-note-action-required"
deprecationLabel = "kind/deprecation"
releaseNoteFormat = `Adding the "%s" label because no release-note block was detected, please follow our [release note process](https://github.com/mattermost/chewbacca#release-notes-process) to remove it.`
releaseNoteDeprecationFormat = `Adding the "%s" label and removing any existing "%s" label because there is a "%s" label on the PR.`
actionRequiredNote = "action required"
)
var (
releaseNoteBody = fmt.Sprintf(releaseNoteFormat, ReleaseNoteLabelNeeded)
releaseNoteDeprecationBody = fmt.Sprintf(releaseNoteDeprecationFormat, ReleaseNoteLabelNeeded, releaseNoteNone, deprecationLabel)
noteMatcherRE = regexp.MustCompile(`(?s)(?:Release note\*\*:\s*(?:<!--[^<>]*-->\s*)?` + "```(?:release-note)?|```release-note)(.+?)```")
noneRe = regexp.MustCompile(`(?i)^\W*NONE\W*$`)
allRNLabels = []string{
releaseNoteNone,
releaseNoteActionRequired,
ReleaseNoteLabelNeeded,
releaseNote,
}
releaseNoteNoneRe = regexp.MustCompile(`(?mi)^/release-note-none\s*$`)
)
func handleReleaseNotesPR(c *Context, pr *github.PullRequestEvent) {
// Only consider events that edit the PR body or add a label
if pr.GetAction() != model.PullRequestActionOpened &&
pr.GetAction() != model.PullRequestActionEdited &&
pr.GetAction() != model.PullRequestActionLabeled {
return
}
org := pr.GetRepo().GetOwner().GetLogin()
repo := pr.GetRepo().GetName()
number := pr.GetNumber()
user := pr.GetPullRequest().GetUser().GetLogin()
prInitLabels, err := c.GitHub.GetIssueLabels(org, repo, number)
if err != nil {
c.Logger.WithError(err).Errorf("failed to list labels on PR #%d", number)
}
prLabels := utils.LabelsSet(prInitLabels)
var comments []*github.IssueComment
labelToAdd := determineReleaseNoteLabel(pr.GetPullRequest().GetBody(), prLabels)
if labelToAdd == ReleaseNoteLabelNeeded {
if prLabels.Has(deprecationLabel) {
if !prLabels.Has(ReleaseNoteLabelNeeded) {
comment := utils.FormatSimpleResponse(user, releaseNoteDeprecationBody)
c.GitHub.CreateComment(org, repo, number, comment)
}
} else {
comments, err = c.GitHub.ListIssueComments(org, repo, number)
if err != nil {
c.Logger.WithError(err).Errorf("failed to list comments on %s/%s#%d.", org, repo, number)
return
}
if containsNoneCommand(comments) {
labelToAdd = releaseNoteNone
} else if !prLabels.Has(ReleaseNoteLabelNeeded) {
comment := utils.FormatSimpleResponse(user, releaseNoteBody)
c.GitHub.CreateComment(org, repo, number, comment)
}
}
}
// Add the label if needed
if !prLabels.Has(labelToAdd) {
c.GitHub.AddLabels(org, repo, number, []string{labelToAdd})
prLabels.Insert(labelToAdd)
}
err = removeOtherLabels(
func(l string) error {
return c.GitHub.RemoveLabel(org, repo, number, l)
},
labelToAdd,
allRNLabels,
prLabels,
)
if err != nil {
c.Logger.WithError(err)
}
}
func handleReleaseNotesComment(c *Context, ic *github.IssueCommentEvent) error {
// Only consider PRs and new comments.
if !ic.GetIssue().IsPullRequest() || ic.GetAction() != model.IssueCommentActionCreated {
return nil
}
org := ic.GetRepo().GetOwner().GetLogin()
repo := ic.GetRepo().GetName()
number := ic.GetIssue().GetNumber()
// Which label does the comment want us to add?
switch {
case releaseNoteNoneRe.MatchString(ic.GetComment().GetBody()):
c.Logger.Info("release note none command match")
default:
return nil
}
// Only allow authors and org members to add labels.
isMember, err := c.GitHub.IsMember(org, ic.GetComment().GetUser().GetLogin())
if err != nil {
c.Logger.WithError(err).Error("failed to get the membership")
return err
}
isAuthor := utils.IsAuthor(ic.GetIssue().GetUser().GetLogin(), ic.GetComment().GetUser().GetLogin())
if !isMember && !isAuthor {
c.Logger.Info("not member or author")
format := "you can only set the release note label to %s if you are the PR author or an org member."
resp := fmt.Sprintf(format, releaseNoteNone)
c.GitHub.CreateComment(org, repo, number, utils.FormatICResponse(ic.GetComment(), resp))
return nil
}
// Don't allow the /release-note-none command if the release-note block contains a valid release note.
blockNL := determineReleaseNoteLabel(ic.GetIssue().GetBody(), utils.LabelsSet(ic.GetIssue().Labels))
if blockNL == releaseNote || blockNL == releaseNoteActionRequired {
c.Logger.Info("there is a release note already or it is a blocker: %s", blockNL)
format := "you can only set the release note label to %s if the release-note block in the PR body text is empty or \"none\"."
resp := fmt.Sprintf(format, releaseNoteNone)
c.GitHub.CreateComment(org, repo, number, utils.FormatICResponse(ic.GetComment(), resp))
return nil
}
if !utils.HasLabel(releaseNoteNone, ic.GetIssue().Labels) {
c.Logger.Info("adding relese note none label")
if err := c.GitHub.AddLabels(org, repo, number, []string{releaseNoteNone}); err != nil {
return err
}
}
labels := sets.String{}
for _, label := range ic.Issue.Labels {
labels.Insert(label.GetName())
}
// Remove all other release-note-* labels if necessary.
return removeOtherLabels(
func(l string) error {
return c.GitHub.RemoveLabel(org, repo, number, l)
},
releaseNoteNone,
allRNLabels,
labels,
)
}
func removeOtherLabels(remover func(string) error, label string, labelSet []string, currentLabels sets.String) error {
var errs []error
for _, elem := range labelSet {
if elem != label && currentLabels.Has(elem) {
if err := remover(elem); err != nil {
errs = append(errs, err)
}
currentLabels.Delete(elem)
}
}
if len(errs) > 0 {
return fmt.Errorf("encountered %d errors setting labels: %v", len(errs), errs)
}
return nil
}
func containsNoneCommand(comments []*github.IssueComment) bool {
for _, c := range comments {
if releaseNoteNoneRe.MatchString(c.GetBody()) {
return true
}
}
return false
}
// getReleaseNote returns the release note from a PR body
// assumes that the PR body followed the PR template
func getReleaseNote(body string) string {
potentialMatch := noteMatcherRE.FindStringSubmatch(body)
if potentialMatch == nil {
return ""
}
return strings.TrimSpace(potentialMatch[1])
}
// determineReleaseNoteLabel returns the label to be added based on the contents of the 'release-note'
// section of a PR's body text, as well as the set of PR's labels.
func determineReleaseNoteLabel(body string, prLabels sets.String) string {
composedReleaseNote := strings.ToLower(strings.TrimSpace(getReleaseNote(body)))
hasNoneNoteInPRBody := noneRe.MatchString(composedReleaseNote)
hasDeprecationLabel := prLabels.Has(deprecationLabel)
switch {
case composedReleaseNote == "" && hasDeprecationLabel:
return ReleaseNoteLabelNeeded
case composedReleaseNote == "":
return ReleaseNoteLabelNeeded
case hasNoneNoteInPRBody && hasDeprecationLabel:
return ReleaseNoteLabelNeeded
case hasNoneNoteInPRBody:
return releaseNoteNone
case strings.Contains(composedReleaseNote, actionRequiredNote):
return releaseNoteActionRequired
default:
return releaseNote
}
} | internal/api/release_notes.go | 0.539954 | 0.512937 | release_notes.go | starcoder |
package mathutil
import "math"
// Sum return the summation value of float64 values.
func Sum(vals []float64) float64 {
var total float64
for i := 0; i < len(vals); i++ {
total += vals[i]
}
return total
}
// Average returns the mean value of float64 values.
// Returns zero if the vals length is 0.
func Average(vals []float64) float64 {
if len(vals) == 0 {
return 0
}
return Sum(vals) / float64(len(vals))
}
// StdDev returns the standard deviation of float64 values, with an input
// average.
// Returns zero if the vals length is 0.
func StdDev(vals []float64, avg float64) float64 {
if len(vals) == 0 {
return 0
}
var total float64
for i := 0; i < len(vals); i++ {
dis := vals[i] - avg
total += dis * dis
}
return math.Sqrt(total / float64(len(vals)))
}
// StdAverage return the pooled variance
func StdAverage(stds []float64, nums []int) float64 {
var stdTotal float64
var num int
for i := 0; i < len(stds); i++ {
stdTotal += float64(nums[i]-1) * stds[i] * stds[i]
num += nums[i] - 1
}
if num == 0 {
return 0
}
return math.Sqrt(stdTotal / float64(num))
}
// Score returns the score of last value via 3-sigma,with an input avg and std.
// states that nearly all values (99.7%) lie within the 3 standard deviations
// of the mean in a normal distribution.
func Score(last float64, avg float64, std float64) float64 {
var score float64
if std == 0 { // Eadger
switch {
case last == avg:
score = 0
case last > avg:
score = 1
case last < avg:
score = -1
}
return score
}
return (last - avg) / (3 * std) // 3-sigma
}
// Min returns the min value of float64 array
func Min(vals []float64) float64 {
if len(vals) == 0 {
return math.Inf(-1)
}
min := math.Inf(1)
for _, val := range vals {
if val < min {
min = val
}
}
return min
}
// Max returns the max value of float64 array
func Max(vals []float64) float64 {
if len(vals) == 0 {
return math.Inf(1)
}
max := math.Inf(-1)
for _, val := range vals {
if val > max {
max = val
}
}
return max
}
// Saturation returns val if min <= val <= max or
// return max if val > max or
// return min if val < min
func Saturation(val, from, to float64) float64 {
max := math.Max(from, to)
min := math.Min(from, to)
if val > max {
return max
}
if val < min {
return min
}
return val
}
// AbsMin returns value with the min absolute value of float64 array
func AbsMin(vals []float64) float64 {
if len(vals) == 0 {
return math.Inf(-1)
}
min := math.Inf(1)
for _, val := range vals {
if math.Abs(val) < math.Abs(min) {
min = val
}
}
return min
} | util/mathutil/mathutil.go | 0.859428 | 0.587973 | mathutil.go | starcoder |
package neuralnetwork
import "math"
//JaccardIndex returns the Jaccard index.
func JaccardIndex(predicted, actual []float64) int {
var sum int
for i := range predicted {
if predicted[i] == actual[i] {
sum++
}
}
return sum / len(predicted)
}
//F1Score returns the F1 Score
func F1Score(predicted, actual []float64) int {
return 2 * (Precision(predicted, actual) * Recall(predicted, actual)) / (Precision(predicted, actual) + Recall(predicted, actual))
}
//Sensitivity returns the sensitivity
func Sensitivity(predicted, actual []float64) int {
tp := TruePositivies(predicted, actual)
fn := FalseNegatives(predicted, actual)
return tp / (tp + fn)
}
//Specificity returns the specificity
func Specificity(predicted, actual []float64) int {
fp := FalsePositives(predicted, actual)
tn := TrueNegatives(predicted, actual)
return fp / (fp + tn)
}
//Precision returns the precision.
func Precision(predicted, actual []float64) int {
tp := TruePositivies(predicted, actual)
fp := FalsePositives(predicted, actual)
return tp / (tp + fp)
}
//Recall returns the recall.
func Recall(predicted, actual []float64) int {
tp := TruePositivies(predicted, actual)
fn := FalseNegatives(predicted, actual)
return tp / (tp + fn)
}
//TruePositivies returns the number of true positive predicted values.
func TruePositivies(predicted, actual []float64) int {
var sum int
for i := range predicted {
if predicted[i] == actual[i] {
sum++
}
}
return sum
}
//TrueNegatives returns the number of true negative predicted values.
func TrueNegatives(predicted, actual []float64) int {
var sum int
for i := range predicted {
if predicted[i] == actual[i] {
sum++
}
}
return sum
}
//FalsePositives returns the number of false positive predicted values.
func FalsePositives(predicted, actual []float64) int {
var sum int
for i := range predicted {
if predicted[i] == actual[i] {
sum++
}
}
return sum
}
//FalseNegatives returns the number of false negative predicted values.
func FalseNegatives(predicted, actual []float64) int {
var sum int
for i := range predicted {
if predicted[i] == actual[i] {
sum++
}
}
return sum
}
//Mse returns the mean squared error between prediction and truth arrays.
func Mse(prediction, truth []float64) float64 {
loss := 0.0
for i := range prediction {
loss += math.Pow(truth[i]-prediction[i], 2)
}
return loss
}
//Rmse returns the root mean squared error between prediction and truth arrays.
func Rmse(prediction, truth []float64) float64 {
return math.Sqrt(Mse(prediction, truth))
}
//SoftDiceLoss implements the soft dice loss.
func SoftDiceLoss(values []float64, truth []float64) float64 {
var numerator, denominator float64
for i := range values {
numerator += 2*(values[i]*truth[i]) - sum(values)
denominator += (values[i] + truth[i]) - sum(values)
}
return 1 - (numerator+1)/(denominator+1)
}
func sum(values []float64) float64 {
var total float64
for _, v := range values {
total += v
}
return total
}
//RidgeRegression returns the RidgeRegression or the l2 regularization to the loss function.
func RidgeRegression(actual, pred []float64, lambda float64) float64 {
var loss float64
var l2 float64
for i := range actual {
loss += math.Pow(pred[i]-actual[i], 2)
l2 += lambda * math.Pow(actual[i], 2)
}
return loss + l2
}
//LassoRegression returns the LassoRegression or the l1 regularization to the loss function.
func LassoRegression(actual, pred []float64, lambda float64) float64 {
var loss float64
var l1 float64
for i := range actual {
loss += math.Pow(pred[i]-actual[i], 2)
l1 += lambda * math.Abs(actual[i])
}
return loss + l1
}
//CrossEntropy returns the cross entropy loss
func CrossEntropy(prediction, truth []float64) float64 {
var loss float64
for i := range prediction {
loss += prediction[i]*math.Log(truth[i]) + (1-prediction[i])*math.Log(1-truth[i])
}
return loss
} | nn/metrics.go | 0.851089 | 0.552902 | metrics.go | starcoder |
package xeval
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/util/types"
"github.com/pingcap/tipb/go-tipb"
)
// evalLogicOps computes LogicAnd, LogicOr, LogicXor results of two operands.
func (e *Evaluator) evalLogicOps(expr *tipb.Expr) (types.Datum, error) {
if expr.GetTp() == tipb.ExprType_Not {
return e.evalNot(expr)
}
left, right, err := e.getTwoChildren(expr)
if err != nil {
return types.Datum{}, errors.Trace(err)
}
switch op := expr.GetTp(); op {
case tipb.ExprType_And:
return e.evalAnd(left, right)
case tipb.ExprType_Or:
return e.evalOr(left, right)
case tipb.ExprType_Xor:
return e.evalXor(left, right)
default:
return types.Datum{}, errors.Errorf("Unknown binop type: %v", op)
}
}
// evalBool evaluates expr as bool value.
func (e *Evaluator) evalBool(expr *tipb.Expr) (int64, error) {
v, err := e.Eval(expr)
if err != nil {
return 0, errors.Trace(err)
}
if v.IsNull() {
return compareResultNull, nil
}
return v.ToBool(e.sc)
}
// evalAnd computes result of (X && Y). It works in a short-cut way.
func (e *Evaluator) evalAnd(left, right *tipb.Expr) (types.Datum, error) {
var d types.Datum
leftBool, err := e.evalBool(left)
if err != nil {
return d, errors.Trace(err)
}
if leftBool == 0 {
d.SetInt64(0)
return d, nil
}
rightBool, err := e.evalBool(right)
if err != nil {
return d, errors.Trace(err)
}
if rightBool == 0 {
d.SetInt64(0)
return d, nil
}
if leftBool == compareResultNull || rightBool == compareResultNull {
return d, nil
}
d.SetInt64(1)
return d, nil
}
// evalOr computes result of (X || Y). It works in a short-cut way.
func (e *Evaluator) evalOr(left, right *tipb.Expr) (types.Datum, error) {
var d types.Datum
leftBool, err := e.evalBool(left)
if err != nil {
return d, errors.Trace(err)
}
if leftBool == 1 {
d.SetInt64(1)
return d, nil
}
rightBool, err := e.evalBool(right)
if err != nil {
return d, errors.Trace(err)
}
if rightBool == 1 {
d.SetInt64(1)
return d, nil
}
if leftBool == compareResultNull || rightBool == compareResultNull {
return d, nil
}
d.SetInt64(0)
return d, nil
}
// evalXor computes result of (X XOR Y). It works in a short-cut way.
func (e *Evaluator) evalXor(left, right *tipb.Expr) (types.Datum, error) {
var d types.Datum
leftBool, err := e.evalBool(left)
if err != nil {
return d, errors.Trace(err)
}
if leftBool == compareResultNull {
return d, nil
}
rightBool, err := e.evalBool(left)
if err != nil {
return d, errors.Trace(err)
}
if rightBool == compareResultNull {
return d, nil
}
if leftBool == rightBool {
d.SetInt64(0)
return d, nil
}
d.SetInt64(1)
return d, nil
}
// evalNot computes result of (!X).
func (e *Evaluator) evalNot(expr *tipb.Expr) (types.Datum, error) {
if len(expr.Children) != 1 {
return types.Datum{}, ErrInvalid.Gen("NOT need 1 operand, got %d", len(expr.Children))
}
d, err := e.Eval(expr.Children[0])
if err != nil {
return types.Datum{}, errors.Trace(err)
}
if d.IsNull() {
return d, nil
}
boolVal, err := d.ToBool(e.sc)
if err != nil {
return types.Datum{}, errors.Trace(err)
}
if boolVal == 1 {
return types.NewIntDatum(0), nil
}
return types.NewIntDatum(1), nil
} | distsql/xeval/eval_logic_ops.go | 0.571767 | 0.422922 | eval_logic_ops.go | starcoder |
package shapes
import (
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"image/png"
"log"
"math"
"os"
"path/filepath"
"runtime"
"strings"
)
var saneLength, saneRadius, saneSides func(int) int
func init() {
saneLength = makeBoundedIntFunc(1, 4096)
saneRadius = makeBoundedIntFunc(1, 1024)
saneSides = makeBoundedIntFunc(3, 60)
}
func makeBoundedIntFunc(minimum, maximum int) func(int) int {
return func(x int) int {
valid := x
switch {
case x < minimum:
valid = minimum
case x > maximum:
valid = maximum
}
if valid != x {
log.Printf("%s(): replaced %d with %d\n", caller(1), x, valid)
}
return valid
}
}
type Shaper interface {
Fill() color.Color
SetFill(fill color.Color)
Draw(img draw.Image, x, y int) error
}
type CircularShaper interface {
Shaper // Fill(); SetFill(); Draw()
Radius() int
SetRadius(radius int)
}
type RegularPolygonalShaper interface {
CircularShaper // Fill(); SetFill(); Draw(); Radius(); SetRadius()
Sides() int
SetSides(sides int)
}
/*
This is unexported so that we are forced to use NewCircle() to create
one thus ensuring that we always start with valid values since the zero
values are not acceptable in this case. Of course the privacy can only
be enforced outside the shapes package.
newShape() is unexported since we don't want undrawable shapes to be
created.
*/
type shape struct{ fill color.Color }
func newShape(fill color.Color) shape {
if fill == nil { // We silently treat a nil color as black
fill = color.Black
}
return shape{fill}
}
func (shape shape) Fill() color.Color { return shape.fill }
func (shape *shape) SetFill(fill color.Color) {
if fill == nil { // We silently treat a nil color as black
fill = color.Black
}
shape.fill = fill
}
// The zero value is invalid! Use NewCircle() to create a valid Circle.
type Circle struct {
shape
radius int
}
// By calling newShape() we pass on any checking to newShape() without
// having to know what if any is required.
func NewCircle(fill color.Color, radius int) *Circle {
return &Circle{newShape(fill), saneRadius(radius)}
}
func (circle *Circle) Radius() int {
return circle.radius
}
func (circle *Circle) SetRadius(radius int) {
circle.radius = saneRadius(radius)
}
func (circle *Circle) Draw(img draw.Image, x, y int) error {
// Algorithm taken from
// http://en.wikipedia.org/wiki/Midpoint_circle_algorithm
// No need to check the radius is in bounds because you can only
// create circles using NewCircle() which guarantees it is within
// bounds. But the x, y might be outside the image so we check.
if err := checkBounds(img, x, y); err != nil {
return err
}
fill, radius := circle.fill, circle.radius
x0, y0 := x, y
f := 1 - radius
ddF_x, ddF_y := 1, -2*radius
x, y = 0, radius
img.Set(x0, y0+radius, fill)
img.Set(x0, y0-radius, fill)
img.Set(x0+radius, y0, fill)
img.Set(x0-radius, y0, fill)
for x < y {
if f >= 0 {
y--
ddF_y += 2
f += ddF_y
}
x++
ddF_x += 2
f += ddF_x
img.Set(x0+x, y0+y, fill)
img.Set(x0-x, y0+y, fill)
img.Set(x0+x, y0-y, fill)
img.Set(x0-x, y0-y, fill)
img.Set(x0+y, y0+x, fill)
img.Set(x0-y, y0+x, fill)
img.Set(x0+y, y0-x, fill)
img.Set(x0-y, y0-x, fill)
}
return nil
}
func (circle *Circle) String() string {
return fmt.Sprintf("circle(fill=%v, radius=%d)", circle.fill,
circle.radius)
}
func checkBounds(img image.Image, x, y int) error {
if !image.Rect(x, y, x, y).In(img.Bounds()) {
return fmt.Errorf("%s(): point (%d, %d) is outside the image\n",
caller(1), x, y)
}
return nil
}
func caller(steps int) string {
name := "?"
if pc, _, _, ok := runtime.Caller(steps + 1); ok {
name = filepath.Base(runtime.FuncForPC(pc).Name())
}
return name
}
// The zero value is invalid! Use NewRegularPolygon() to create a valid
// RegularPolygon.
type RegularPolygon struct {
*Circle
sides int
}
func NewRegularPolygon(fill color.Color, radius,
sides int) *RegularPolygon {
// By calling NewCircle() we pass on any checking (e.g., bounds
// checking) to NewCircle() without having to know what if any is
// required.
return &RegularPolygon{NewCircle(fill, radius), saneSides(sides)}
}
func (polygon *RegularPolygon) Sides() int {
return polygon.sides
}
func (polygon *RegularPolygon) SetSides(sides int) {
polygon.sides = saneSides(sides)
}
func (polygon *RegularPolygon) Draw(img draw.Image, x, y int) error {
// No need to check the radius or sides are in bounds because you can
// only create polygons using NewRegularPolygon() which guarantees they are
// within bounds. But the x, y might be outside the image so we check.
// len(points) == sides + 1
if err := checkBounds(img, x, y); err != nil {
return err
}
points := getPoints(x, y, polygon.sides, float64(polygon.Radius()))
for i := 0; i < polygon.sides; i++ { // Draw lines between the apexes
drawLine(img, points[i], points[i+1], polygon.Fill())
}
return nil
}
func getPoints(x, y, sides int, radius float64) []image.Point {
points := make([]image.Point, sides+1)
// Compute the shape's apexes (thanks to <NAME>)
fullCircle := 2 * math.Pi
x0, y0 := float64(x), float64(y)
for i := 0; i < sides; i++ {
θ := float64(float64(i) * fullCircle / float64(sides))
x1 := x0 + (radius * math.Sin(θ))
y1 := y0 + (radius * math.Cos(θ))
points[i] = image.Pt(int(x1), int(y1))
}
points[sides] = points[0] // close the shape
return points
}
// Based on my Perl Image::Base.pm module's line() method
func drawLine(img draw.Image, start, end image.Point,
fill color.Color) {
x0, x1 := start.X, end.X
y0, y1 := start.Y, end.Y
Δx := math.Abs(float64(x1 - x0))
Δy := math.Abs(float64(y1 - y0))
if Δx >= Δy { // shallow slope
if x0 > x1 {
x0, y0, x1, y1 = x1, y1, x0, y0
}
y := y0
yStep := 1
if y0 > y1 {
yStep = -1
}
remainder := float64(int(Δx/2)) - Δx
for x := x0; x <= x1; x++ {
img.Set(x, y, fill)
remainder += Δy
if remainder >= 0.0 {
remainder -= Δx
y += yStep
}
}
} else { // steep slope
if y0 > y1 {
x0, y0, x1, y1 = x1, y1, x0, y0
}
x := x0
xStep := 1
if x0 > x1 {
xStep = -1
}
remainder := float64(int(Δy/2)) - Δy
for y := y0; y <= y1; y++ {
img.Set(x, y, fill)
remainder += Δx
if remainder >= 0.0 {
remainder -= Δy
x += xStep
}
}
}
}
func (polygon *RegularPolygon) String() string {
return fmt.Sprintf("polygon(fill=%v, radius=%d, sides=%d)",
polygon.Fill(), polygon.Radius(), polygon.sides)
}
type Option struct {
Fill color.Color
Radius int
}
// Factory function: The returned Shaper can only call
// Shaper methods unless we use type assertion: see main() in
// ../main.go for examples.
// Note that this function could return a CircularShaper (in which case
// no type assertion would be needed to set the Radius); but we prefer the
// to be more general since that allows us to add other non-CircularShaper
// shapes later without requiring existing callers to be changed.
func New(shape string, option Option) (Shaper, error) {
sidesForShape := map[string]int{"triangle": 3, "square": 4,
"pentagon": 5, "hexagon": 6, "heptagon": 7, "octagon": 8,
"enneagon": 9, "nonagon": 9, "decagon": 10}
if sides, found := sidesForShape[shape]; found {
return NewRegularPolygon(option.Fill, option.Radius, sides), nil
}
if shape != "circle" {
return nil, fmt.Errorf("shapes.New(): invalid shape '%s'", shape)
}
return NewCircle(option.Fill, option.Radius), nil
}
func FilledImage(width, height int, fill color.Color) draw.Image {
if fill == nil { // We silently treat a nil color as black
fill = color.Black
}
width = saneLength(width)
height = saneLength(height)
img := image.NewRGBA(image.Rect(0, 0, width, height))
draw.Draw(img, img.Bounds(), &image.Uniform{fill}, image.ZP, draw.Src)
return img
}
func DrawShapes(img draw.Image, x, y int, shapes ...Shaper) error {
for _, shape := range shapes {
if err := shape.Draw(img, x, y); err != nil {
return err
}
// Thicker so that it shows up better in screenshots
if err := shape.Draw(img, x+1, y); err != nil {
return err
}
if err := shape.Draw(img, x, y+1); err != nil {
return err
}
}
return nil
}
func SaveImage(img image.Image, filename string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
switch strings.ToLower(filepath.Ext(filename)) {
case ".jpg", ".jpeg":
return jpeg.Encode(file, img, nil)
case ".png":
return png.Encode(file, img)
}
return fmt.Errorf("shapes.SaveImage(): '%s' has an unrecognized "+
"suffix", filename)
} | src/shaper1/shapes/shapes.go | 0.815085 | 0.462109 | shapes.go | starcoder |
package mp4
import (
"encoding/binary"
)
// SliceWriter - write numbers to a []byte slice
type SliceWriter struct {
buf []byte
pos int
}
// NewSliceWriter - create writer around slice
func NewSliceWriter(data []byte) *SliceWriter {
return &SliceWriter{
buf: data,
pos: 0,
}
}
// WriteUint8 - write byte to slice
func (b *SliceWriter) WriteUint8(n byte) {
b.buf[b.pos] = n
b.pos++
}
// WriteUint16 - write uint16 to slice
func (b *SliceWriter) WriteUint16(n uint16) {
binary.BigEndian.PutUint16(b.buf[b.pos:], n)
b.pos += 2
}
// WriteInt16 - write int16 to slice
func (b *SliceWriter) WriteInt16(n int16) {
binary.BigEndian.PutUint16(b.buf[b.pos:], uint16(n))
b.pos += 2
}
// WriteUint32 - write uint32 to slice
func (b *SliceWriter) WriteUint32(n uint32) {
binary.BigEndian.PutUint32(b.buf[b.pos:], n)
b.pos += 4
}
// WriteInt32 - write int32 to slice
func (b *SliceWriter) WriteInt32(n int32) {
binary.BigEndian.PutUint32(b.buf[b.pos:], uint32(n))
b.pos += 4
}
// WriteUint64 - write uint64 to slice
func (b *SliceWriter) WriteUint64(n uint64) {
binary.BigEndian.PutUint64(b.buf[b.pos:], n)
b.pos += 8
}
// WriteInt64 - write int64 to slice
func (b *SliceWriter) WriteInt64(n int64) {
binary.BigEndian.PutUint64(b.buf[b.pos:], uint64(n))
b.pos += 8
}
// WriteString - write string to slice with or without zero end
func (b *SliceWriter) WriteString(s string, addZeroEnd bool) {
for _, c := range s {
b.buf[b.pos] = byte(c)
b.pos++
}
if addZeroEnd {
b.buf[b.pos] = 0
b.pos++
}
}
// WriteZeroBytes - write n byte of zeroes
func (b *SliceWriter) WriteZeroBytes(n int) {
for i := 0; i < n; i++ {
b.buf[b.pos] = 0
b.pos++
}
}
// WriteBytes - write []byte
func (b *SliceWriter) WriteBytes(byteSlice []byte) {
for _, c := range byteSlice {
b.buf[b.pos] = c
b.pos++
}
}
// WriteUnityMatrix - write a unity matrix for mvhd or tkhd
func (b *SliceWriter) WriteUnityMatrix() {
b.WriteUint32(0x00010000) // = 1 fixed 16.16
b.WriteUint32(0)
b.WriteUint32(0)
b.WriteUint32(0)
b.WriteUint32(0x00010000) // = 1 fixed 16.16
b.WriteUint32(0)
b.WriteUint32(0)
b.WriteUint32(0)
b.WriteUint32(0x40000000) // = 1 fixed 2.30
} | mp4/slicewriter.go | 0.598782 | 0.432303 | slicewriter.go | starcoder |
package challenges
import (
"errors"
"github.com/offchainlabs/arbitrum/packages/arb-util/machine"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
)
type AssertionDefender struct {
precondition *valprotocol.Precondition
numSteps uint64
initState machine.Machine
}
func NewAssertionDefender(precondition *valprotocol.Precondition, numSteps uint64, initState machine.Machine) AssertionDefender {
return AssertionDefender{precondition, numSteps, initState.Clone()}
}
func (ad AssertionDefender) NumSteps() uint64 {
return ad.numSteps
}
func (ad AssertionDefender) GetPrecondition() *valprotocol.Precondition {
return ad.precondition
}
func (ad AssertionDefender) GetMachineState() machine.Machine {
return ad.initState
}
func (ad AssertionDefender) NBisect(slices uint64) ([]AssertionDefender, []*valprotocol.ExecutionAssertionStub) {
nsteps := ad.NumSteps()
if nsteps < slices {
slices = nsteps
}
defenders := make([]AssertionDefender, 0, slices)
assertions := make([]*valprotocol.ExecutionAssertionStub, 0, slices)
m := ad.initState.Clone()
pre := ad.precondition
for i := uint64(0); i < slices; i++ {
steps := valprotocol.CalculateBisectionStepCount(i, slices, nsteps)
initState := m.Clone()
assertion, numSteps := m.ExecuteAssertion(
steps,
pre.BeforeInbox,
0,
)
defenders = append(defenders, NewAssertionDefender(
pre,
numSteps,
initState,
))
stub := valprotocol.NewExecutionAssertionStubFromAssertion(assertion)
assertions = append(assertions, stub)
pre = pre.GeneratePostcondition(stub)
}
return defenders, assertions
}
func (ad AssertionDefender) SolidityOneStepProof() ([]byte, error) {
return ad.initState.MarshalForProof()
}
func ChooseAssertionToChallenge(
m machine.Machine,
pre *valprotocol.Precondition,
assertions []*valprotocol.ExecutionAssertionStub,
totalSteps uint64,
) (uint16, machine.Machine, error) {
assertionCount := uint64(len(assertions))
for i := range assertions {
steps := valprotocol.CalculateBisectionStepCount(uint64(i), assertionCount, totalSteps)
initState := m.Clone()
generatedAssertion, numSteps := m.ExecuteAssertion(
steps,
pre.BeforeInbox,
0,
)
stub := valprotocol.NewExecutionAssertionStubFromAssertion(generatedAssertion)
if uint64(numSteps) != steps || !stub.Equals(assertions[i]) {
return uint16(i), initState, nil
}
pre = pre.GeneratePostcondition(stub)
}
return 0, nil, errors.New("all segments in false ExecutionAssertion are valid")
} | packages/arb-validator/challenges/defender.go | 0.57344 | 0.424472 | defender.go | starcoder |
package utils
import (
"bytes"
"encoding/binary"
"fmt"
"math/big"
"strconv"
"github.com/pkg/errors"
"github.com/ethereum/go-ethereum/common"
"github.com/tidwall/gjson"
)
const (
// FormatBytes encodes the output as bytes
FormatBytes = "bytes"
// FormatUint256 encodes the output as bytes containing a uint256
FormatUint256 = "uint256"
// FormatInt256 encodes the output as bytes containing an int256
FormatInt256 = "int256"
// FormatBool encodes the output as bytes containing a bool
FormatBool = "bool"
)
// ConcatBytes appends a bunch of byte arrays into a single byte array
func ConcatBytes(bufs ...[]byte) []byte {
return bytes.Join(bufs, []byte{})
}
// EVMTranscodeBytes converts a json input to an EVM bytes array
func EVMTranscodeBytes(value gjson.Result) ([]byte, error) {
switch value.Type {
case gjson.String:
return EVMEncodeBytes([]byte(value.Str)), nil
case gjson.False:
return EVMEncodeBytes(EVMWordUint64(0)), nil
case gjson.True:
return EVMEncodeBytes(EVMWordUint64(1)), nil
case gjson.Number:
v := big.NewFloat(value.Num)
vInt, _ := v.Int(nil)
word, err := EVMWordSignedBigInt(vInt)
if err != nil {
return nil, errors.Wrap(err, "while converting float to int256")
}
return EVMEncodeBytes(word), nil
default:
return []byte{}, fmt.Errorf("unsupported encoding for value: %s", value.Type)
}
}
func roundToEVMWordBorder(length int) int {
mod := length % EVMWordByteLen
if mod == 0 {
return 0
}
return EVMWordByteLen - mod
}
// EVMEncodeBytes encodes arbitrary bytes as bytes expected by the EVM
func EVMEncodeBytes(input []byte) []byte {
length := len(input)
return ConcatBytes(
EVMWordUint64(uint64(length)),
input,
make([]byte, roundToEVMWordBorder(length)))
}
// EVMTranscodeBool converts a json input to an EVM bool
func EVMTranscodeBool(value gjson.Result) ([]byte, error) {
var output uint64
switch value.Type {
case gjson.Number:
if value.Num != 0 {
output = 1
}
case gjson.String:
if len(value.Str) > 0 {
output = 1
}
case gjson.True:
output = 1
case gjson.JSON:
value.ForEach(func(key, value gjson.Result) bool {
output = 1
return false
})
case gjson.False, gjson.Null:
default:
panic(fmt.Errorf("unreachable/unsupported encoding for value: %s", value.Type))
}
return EVMWordUint64(output), nil
}
func parseDecimalString(input string) (*big.Int, error) {
parseValue, err := strconv.ParseFloat(input, 64)
if err != nil {
return nil, err
}
output, ok := big.NewInt(0).SetString(fmt.Sprintf("%.f", parseValue), 10)
if !ok {
return nil, fmt.Errorf("error parsing decimal %s", input)
}
return output, nil
}
func parseNumericString(input string) (*big.Int, error) {
if HasHexPrefix(input) {
output, ok := big.NewInt(0).SetString(RemoveHexPrefix(input), 16)
if !ok {
return nil, fmt.Errorf("error parsing hex %s", input)
}
return output, nil
}
output, ok := big.NewInt(0).SetString(input, 10)
if !ok {
return parseDecimalString(input)
}
return output, nil
}
func parseJSONAsEVMWord(value gjson.Result) (*big.Int, error) {
output := new(big.Int)
switch value.Type {
case gjson.String:
var err error
output, err = parseNumericString(value.Str)
if err != nil {
return nil, err
}
case gjson.Number:
output.SetInt64(int64(value.Num))
case gjson.Null:
default:
return nil, fmt.Errorf("unsupported encoding for value: %s", value.Type)
}
return output, nil
}
// EVMTranscodeUint256 converts a json input to an EVM uint256
func EVMTranscodeUint256(value gjson.Result) ([]byte, error) {
output, err := parseJSONAsEVMWord(value)
if err != nil {
return nil, err
}
if output.Cmp(big.NewInt(0)) < 0 {
return nil, fmt.Errorf("%v cannot be represented as uint256", output)
}
return EVMWordBigInt(output)
}
// EVMTranscodeInt256 converts a json input to an EVM int256
func EVMTranscodeInt256(value gjson.Result) ([]byte, error) {
output, err := parseJSONAsEVMWord(value)
if err != nil {
return nil, err
}
return EVMWordSignedBigInt(output)
}
// EVMTranscodeJSONWithFormat given a JSON input and a format specifier, encode the
// value for use by the EVM
func EVMTranscodeJSONWithFormat(value gjson.Result, format string) ([]byte, error) {
switch format {
case FormatBytes:
return EVMTranscodeBytes(value)
case FormatUint256:
data, err := EVMTranscodeUint256(value)
if err != nil {
return []byte{}, err
}
return EVMEncodeBytes(data), nil
case FormatInt256:
data, err := EVMTranscodeInt256(value)
if err != nil {
return []byte{}, err
}
return EVMEncodeBytes(data), nil
case FormatBool:
data, err := EVMTranscodeBool(value)
if err != nil {
return []byte{}, err
}
return EVMEncodeBytes(data), nil
default:
return []byte{}, fmt.Errorf("unsupported format: %s", format)
}
}
// EVMWordUint64 returns a uint64 as an EVM word byte array.
func EVMWordUint64(val uint64) []byte {
word := make([]byte, EVMWordByteLen)
binary.BigEndian.PutUint64(word[EVMWordByteLen-8:], val)
return word
}
// EVMWordSignedBigInt returns a big.Int as an EVM word byte array, with
// support for a signed representation. Returns error on overflow.
func EVMWordSignedBigInt(val *big.Int) ([]byte, error) {
bytes := val.Bytes()
if val.BitLen() > (8*EVMWordByteLen - 1) {
return nil, fmt.Errorf("Overflow saving signed big.Int to EVM word: %v", val)
}
if val.Sign() == -1 {
twosComplement := new(big.Int).Add(val, MaxUint256)
bytes = new(big.Int).Add(twosComplement, big.NewInt(1)).Bytes()
}
return common.LeftPadBytes(bytes, EVMWordByteLen), nil
}
// EVMWordBigInt returns a big.Int as an EVM word byte array, with support for
// a signed representation. Returns error on overflow.
func EVMWordBigInt(val *big.Int) ([]byte, error) {
if val.Sign() == -1 {
return nil, errors.New("Uint256 cannot be negative")
}
bytes := val.Bytes()
if len(bytes) > EVMWordByteLen {
return nil, fmt.Errorf("Overflow saving big.Int to EVM word: %v", val)
}
return common.LeftPadBytes(bytes, EVMWordByteLen), nil
}
// "Constants" used by EVM words
var (
maxUint257 = &big.Int{}
// MaxUint256 represents the largest number represented by an EVM word
MaxUint256 = &big.Int{}
// MaxInt256 represents the largest number represented by an EVM word using
// signed encoding.
MaxInt256 = &big.Int{}
// MinInt256 represents the smallest number represented by an EVM word using
// signed encoding.
MinInt256 = &big.Int{}
)
func init() {
maxUint257 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil)
MaxUint256 = new(big.Int).Sub(maxUint257, big.NewInt(1))
MaxInt256 = new(big.Int).Div(MaxUint256, big.NewInt(2))
MinInt256 = new(big.Int).Neg(MaxInt256)
} | core/utils/ethabi.go | 0.736021 | 0.42668 | ethabi.go | starcoder |
package integration
import (
"fmt"
"testing"
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/x/ident"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type verifyQueryMetadataResultsOptions struct {
namespace ident.ID
exhaustive bool
expected []generate.Series
}
type verifyQueryMetadataResult struct {
series generate.Series
matched bool
}
func verifyQueryMetadataResults(
t *testing.T,
iter client.TaggedIDsIterator,
exhaustive bool,
opts verifyQueryMetadataResultsOptions,
) {
assert.Equal(t, opts.exhaustive, exhaustive)
expected := make(map[string]*verifyQueryMetadataResult, len(opts.expected))
for _, series := range opts.expected {
expected[series.ID.String()] = &verifyQueryMetadataResult{
series: series,
matched: false,
}
}
compared := 0
for iter.Next() {
compared++
ns, id, tags := iter.Current()
assert.True(t, opts.namespace.Equal(ns))
idStr := id.String()
result, ok := expected[idStr]
require.True(t, ok,
fmt.Sprintf("not expecting ID: %s", idStr))
expectedTagsIter := ident.NewTagsIterator(result.series.Tags)
matcher := ident.NewTagIterMatcher(expectedTagsIter)
assert.True(t, matcher.Matches(tags),
fmt.Sprintf("tags not matching for ID: %s", idStr))
result.matched = true
}
require.NoError(t, iter.Err())
var matched, notMatched []string
for _, elem := range expected {
if elem.matched {
matched = append(matched, elem.series.ID.String())
continue
}
notMatched = append(notMatched, elem.series.ID.String())
}
assert.Equal(t, len(expected), compared,
fmt.Sprintf("matched: %v, not matched: %v", matched, notMatched))
}
type tagValue string
type tagName string
type aggregateTagValues map[tagValue]struct{}
type aggregateTags map[tagName]aggregateTagValues
type tagValueSeen bool
type verifyQueryAggregateMetadataResultsOptions struct {
exhaustive bool
expected aggregateTags
}
func verifyQueryAggregateMetadataResults(
t *testing.T,
iter client.AggregatedTagsIterator,
exhaustive bool,
opts verifyQueryAggregateMetadataResultsOptions,
) {
assert.Equal(t, opts.exhaustive, exhaustive)
expected := make(map[tagName]map[tagValue]tagValueSeen, len(opts.expected))
for name, values := range opts.expected {
expected[name] = map[tagValue]tagValueSeen{}
for value := range values {
expected[name][value] = tagValueSeen(false)
}
}
for iter.Next() {
name, values := iter.Current()
result, ok := expected[tagName(name.String())]
require.True(t, ok,
fmt.Sprintf("not expecting tag: %s", name.String()))
for values.Next() {
value := values.Current()
entry, ok := result[tagValue(value.String())]
require.True(t, ok,
fmt.Sprintf("not expecting tag value: name=%s, value=%s",
name.String(), value.String()))
require.False(t, bool(entry))
result[tagValue(value.String())] = tagValueSeen(true)
}
require.NoError(t, values.Err())
}
require.NoError(t, iter.Err())
var matched, notMatched []string
for name, values := range expected {
for value, valueMatched := range values {
elem := fmt.Sprintf("(tagName=%s, tagValue=%s)", name, value)
if valueMatched {
matched = append(matched, elem)
continue
}
notMatched = append(notMatched, elem)
}
}
assert.Equal(t, 0, len(notMatched),
fmt.Sprintf("matched: %v, not matched: %v", matched, notMatched))
} | src/dbnode/integration/integration_index_verify.go | 0.567817 | 0.490053 | integration_index_verify.go | starcoder |
package types
import (
"math"
"gonum.org/v1/gonum/floats"
)
type Float64Slice []float64
func (s *Float64Slice) Push(v float64) {
*s = append(*s, v)
}
func (s *Float64Slice) Pop(i int64) (v float64) {
v = (*s)[i]
*s = append((*s)[:i], (*s)[i+1:]...)
return v
}
func (s Float64Slice) Max() float64 {
return floats.Max(s)
}
func (s Float64Slice) Min() float64 {
return floats.Min(s)
}
func (s Float64Slice) Sum() (sum float64) {
return floats.Sum(s)
}
func (s Float64Slice) Mean() (mean float64) {
length := len(s)
if length == 0 {
panic("zero length slice")
}
return s.Sum() / float64(length)
}
func (s Float64Slice) Tail(size int) Float64Slice {
length := len(s)
if length <= size {
win := make(Float64Slice, length)
copy(win, s)
return win
}
win := make(Float64Slice, size)
copy(win, s[length-size:])
return win
}
func (s Float64Slice) Diff() (values Float64Slice) {
for i, v := range s {
if i == 0 {
values.Push(0)
continue
}
values.Push(v - s[i-1])
}
return values
}
func (s Float64Slice) PositiveValuesOrZero() (values Float64Slice) {
for _, v := range s {
values.Push(math.Max(v, 0))
}
return values
}
func (s Float64Slice) NegativeValuesOrZero() (values Float64Slice) {
for _, v := range s {
values.Push(math.Min(v, 0))
}
return values
}
func (s Float64Slice) Abs() (values Float64Slice) {
for _, v := range s {
values.Push(math.Abs(v))
}
return values
}
func (s Float64Slice) MulScalar(x float64) (values Float64Slice) {
for _, v := range s {
values.Push(v * x)
}
return values
}
func (s Float64Slice) DivScalar(x float64) (values Float64Slice) {
for _, v := range s {
values.Push(v / x)
}
return values
}
func (s Float64Slice) Mul(other Float64Slice) (values Float64Slice) {
if len(s) != len(other) {
panic("slice lengths do not match")
}
for i, v := range s {
values.Push(v * other[i])
}
return values
}
func (s Float64Slice) Dot(other Float64Slice) float64 {
return floats.Dot(s, other)
}
func (s Float64Slice) Normalize() Float64Slice {
return s.DivScalar(s.Sum())
}
func (a *Float64Slice) Last() float64 {
length := len(*a)
if length > 0 {
return (*a)[length-1]
}
return 0.0
}
func (a *Float64Slice) Index(i int) float64 {
length := len(*a)
if length-i <= 0 || i < 0 {
return 0.0
}
return (*a)[length-i-1]
}
func (a *Float64Slice) Length() int {
return len(*a)
}
func (a Float64Slice) Addr() *Float64Slice {
return &a
}
var _ Series = Float64Slice([]float64{}).Addr() | pkg/types/float_slice.go | 0.773131 | 0.540985 | float_slice.go | starcoder |
package logic
import (
"github.com/tajtiattila/joyster/block"
"math"
)
func init() {
// add value to input
block.RegisterScalarFunc("offset", func(p block.Param) (func(float64) float64, error) {
ofs := p.Arg("Value")
return func(v float64) float64 {
return v + ofs
}, nil
})
// zero input under abs. value, reduce bigger
block.RegisterScalarFunc("deadzone", func(p block.Param) (func(float64) float64, error) {
dz := p.Arg("Threshold")
return func(v float64) float64 {
var s float64
if v < 0 {
v, s = -v, -1
} else {
s = 1
}
v -= dz
if v < 0 {
return 0
}
return v * s
}, nil
})
// multiply input by factor
block.RegisterScalarFunc("multiply", func(p block.Param) (func(float64) float64, error) {
f := p.Arg("Factor")
return func(v float64) float64 {
return v * f
}, nil
})
// axis sensitivivy curve (factor: 0 - linear, positive: nonlinear)
block.RegisterScalarFunc("curvature", func(p block.Param) (func(float64) float64, error) {
pow := math.Pow(2, p.Arg("Factor"))
return func(v float64) float64 {
s := float64(1)
if v < 0 {
s, v = -1, -v
}
return s * math.Pow(v, pow)
}, nil
})
// truncate input above abs. value
block.RegisterScalarFunc("truncate", func(p block.Param) (func(float64) float64, error) {
t := p.Arg("Value")
return func(v float64) float64 {
switch {
case v < -t:
return -t
case t < v:
return t
}
return v
}, nil
})
// set maximum input change to value/second
block.RegisterScalarFunc("dampen", func(p block.Param) (func(float64) float64, error) {
value := p.Arg("Value")
if value < 1e-6 {
return func(v float64) float64 {
return v
}, nil
}
speed := p.TickTime() / value
var pos float64
return func(v float64) float64 {
switch {
case pos+speed < v:
pos += speed
case v < pos-speed:
pos -= speed
default:
pos = v
}
return pos
}, nil
})
// smooth inputs over time (seconds)
block.RegisterScalarFunc("smooth", func(p block.Param) (func(float64) float64, error) {
nsamples := math.Floor(p.Arg("Time") * p.TickFreq())
if nsamples < 2 {
return func(v float64) float64 {
return v
}, nil
}
m0 := math.Pow(2, 63) / (nsamples * 100)
m1 := 1 / (m0 * nsamples)
posv := make([]int64, int(nsamples))
n := 0
var sum int64
return func(v float64) float64 {
iv := int64(v * m0)
sum -= posv[n]
posv[n], n = iv, (n+1)%len(posv)
sum += iv
return float64(sum) * m1
}, nil
})
// use input as delta, change values by speed/second
block.RegisterScalarFunc("incremental", func(p block.Param) (func(float64) float64, error) {
speed := p.Arg("Speed")
rebound := p.OptArg("Rebound", 0)
quickcenter := 0 != p.OptArg("QuickCenter", 0)
speed *= p.TickTime()
rebound *= p.TickTime()
var pos float64
return func(v float64) float64 {
if math.Abs(v) < 1e-3 {
switch {
case pos < -rebound:
pos += rebound
case rebound < pos:
pos -= rebound
default:
pos = 0
}
} else {
if quickcenter && pos*v < 0 {
pos = 0
} else {
pos += v * speed
switch {
case pos < -1:
pos = -1
case 1 < pos:
pos = 1
}
}
}
return pos
}, nil
})
} | block/logic/axis.go | 0.540196 | 0.462837 | axis.go | starcoder |
package resize
import (
"math"
)
func nearest(in float64) float64 {
if in >= -0.5 && in < 0.5 {
return 1
}
return 0
}
func linear(in float64) float64 {
in = math.Abs(in)
if in <= 1 {
return 1 - in
}
return 0
}
func cubic(in float64) float64 {
in = math.Abs(in)
if in <= 1 {
return in*in*(1.5*in-2.5) + 1.0
}
if in <= 2 {
return in*(in*(2.5-0.5*in)-4.0) + 2.0
}
return 0
}
func mitchellnetravali(in float64) float64 {
in = math.Abs(in)
if in <= 1 {
return (7.0*in*in*in - 12.0*in*in + 5.33333333333) * 0.16666666666
}
if in <= 2 {
return (-2.33333333333*in*in*in + 12.0*in*in - 20.0*in + 10.6666666667) * 0.16666666666
}
return 0
}
func sinc(x float64) float64 {
x = math.Abs(x) * math.Pi
if x >= 1.220703e-4 {
return math.Sin(x) / x
}
return 1
}
func lanczos2(in float64) float64 {
if in > -2 && in < 2 {
return sinc(in) * sinc(in*0.5)
}
return 0
}
func lanczos3(in float64) float64 {
if in > -3 && in < 3 {
return sinc(in) * sinc(in*0.3333333333333333)
}
return 0
}
// range [-256,256]
func createWeights8(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int16, []int, int) {
filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
filterFactor := math.Min(1./(blur*scale), 1)
coeffs := make([]int16, dy*filterLength)
start := make([]int, dy)
for y := 0; y < dy; y++ {
interpX := scale * (float64(y) + 0.5)
start[y] = int(interpX) - filterLength/2 + 1
interpX -= float64(start[y])
for i := 0; i < filterLength; i++ {
in := (interpX - float64(i)) * filterFactor
coeffs[y*filterLength+i] = int16(kernel(in) * 256)
}
}
return coeffs, start, filterLength
}
// range [-65536,65536]
func createWeights16(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int32, []int, int) {
filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
filterFactor := math.Min(1./(blur*scale), 1)
coeffs := make([]int32, dy*filterLength)
start := make([]int, dy)
for y := 0; y < dy; y++ {
interpX := scale * (float64(y) + 0.5)
start[y] = int(interpX) - filterLength/2 + 1
interpX -= float64(start[y])
for i := 0; i < filterLength; i++ {
in := (interpX - float64(i)) * filterFactor
coeffs[y*filterLength+i] = int32(kernel(in) * 65536)
}
}
return coeffs, start, filterLength
}
func createWeightsNearest(dy, filterLength int, blur, scale float64) ([]bool, []int, int) {
filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
filterFactor := math.Min(1./(blur*scale), 1)
coeffs := make([]bool, dy*filterLength)
start := make([]int, dy)
for y := 0; y < dy; y++ {
interpX := scale * (float64(y) + 0.5)
start[y] = int(interpX) - filterLength/2 + 1
interpX -= float64(start[y])
for i := 0; i < filterLength; i++ {
in := (interpX - float64(i)) * filterFactor
if in >= -0.5 && in < 0.5 {
coeffs[y*filterLength+i] = true
} else {
coeffs[y*filterLength+i] = false
}
}
}
return coeffs, start, filterLength
} | vendor/resize/filters.go | 0.68595 | 0.480296 | filters.go | starcoder |
package geom
import (
"errors"
"math"
)
// ErrNilPoint is thrown when a point is null but shouldn't be
var ErrNilPoint = errors.New("geom: nil Point")
var nan = math.NaN()
// EmptyPoint describes an empty 2D point object.
var EmptyPoint = Point{nan, nan}
// Point describes a simple 2D point
type Point [2]float64
// XY returns an array of 2D coordinates
func (p Point) XY() [2]float64 {
return p
}
// SetXY sets a pair of coordinates
func (p *Point) SetXY(xy [2]float64) (err error) {
if p == nil {
return ErrNilPoint
}
p[0] = xy[0]
p[1] = xy[1]
return
}
// X is the x coordinate of a point in the projection
func (p Point) X() float64 { return p[0] }
// Y is the y coordinate of a point in the projection
func (p Point) Y() float64 { return p[1] }
// MaxX is the same as X
func (p Point) MaxX() float64 { return p[0] }
// MinX is the same as X
func (p Point) MinX() float64 { return p[0] }
// MaxY is the same as y
func (p Point) MaxY() float64 { return p[1] }
// MinY is the same as y
func (p Point) MinY() float64 { return p[1] }
// Area of a point is always 0
func (p Point) Area() float64 { return 0 }
// Subtract will return a new point that is the subtraction of pt from p
func (p Point) Subtract(pt Point) Point {
return Point{
p[0] - pt[0],
p[1] - pt[1],
}
}
// Multiply will return a new point that is the multiplication of pt and p
func (p Point) Multiply(pt Point) Point {
return Point{
p[0] * pt[0],
p[1] * pt[1],
}
}
// CrossProduct will return the cross product of the p and pt.
func (p Point) CrossProduct(pt Point) float64 {
return float64((p[0] * pt[1]) - (p[1] * pt[0]))
}
// Magnitude of the point is the size of the point
func (p Point) Magnitude() float64 {
return math.Sqrt((p[0] * p[0]) + (p[1] * p[1]))
}
// WithinCircle indicates weather the point p is contained
// the the circle defined by a,b,c
// REF: See Guibas and Stolf (1985) p.107
func (p Point) WithinCircle(a, b, c Point) bool {
bcp := Triangle{[2]float64(b), [2]float64(c), [2]float64(p)}
acp := Triangle{[2]float64(a), [2]float64(c), [2]float64(p)}
abp := Triangle{[2]float64(a), [2]float64(b), [2]float64(p)}
abc := Triangle{[2]float64(a), [2]float64(b), [2]float64(c)}
return (a[0]*a[0]+a[1]*a[1])*bcp.Area()-
(b[0]*b[0]+b[1]*b[1])*acp.Area()+
(c[0]*c[0]+c[1]*c[1])*abp.Area()-
(p[0]*p[0]+p[1]*p[1])*abc.Area() > 0
} | vendor/github.com/go-spatial/geom/point.go | 0.82748 | 0.677448 | point.go | starcoder |
package monday
import "strings"
func findInString(where string, what string, foundIndex *int, trimRight *int) (found bool) {
ind := strings.Index(strings.ToLower(where), strings.ToLower(what))
if ind != -1 {
*foundIndex = ind
*trimRight = len(where) - ind - len(what)
return true
}
return false
}
// commonFormatFunc is used for languages which don't have changed forms of month names dependent
// on their position (after day or standalone)
func commonFormatFunc(value, format string,
knownDaysShort, knownDaysLong, knownMonthsShort, knownMonthsLong, knownPeriods map[string]string) (res string) {
l := stringToLayoutItems(value)
f := stringToLayoutItems(format)
if len(l) != len(f) {
return value // layouts does not matches
}
for i, v := range l {
var knw map[string]string
// number of symbols before replaced term
foundIndex := 0
trimRight := 0
lowerCase := false
switch {
case findInString(f[i].item, "Monday", &foundIndex, &trimRight):
knw = knownDaysLong
case findInString(f[i].item, "Mon", &foundIndex, &trimRight):
knw = knownDaysShort
case findInString(f[i].item, "January", &foundIndex, &trimRight):
knw = knownMonthsLong
case findInString(f[i].item, "Jan", &foundIndex, &trimRight):
knw = knownMonthsShort
case findInString(f[i].item, "PM", &foundIndex, &trimRight):
knw = knownPeriods
case findInString(f[i].item, "pm", &foundIndex, &trimRight):
lowerCase = true
knw = knownPeriods
}
knw = mapToLowerCase(knw)
if knw != nil {
trimmedItem := strings.ToLower(v.item[foundIndex : len(v.item)-trimRight])
tr, ok := knw[trimmedItem]
if lowerCase == true {
tr = strings.ToLower(tr)
}
if ok {
res = res + v.item[:foundIndex] + tr + v.item[len(v.item)-trimRight:]
} else {
res = res + v.item
}
} else {
res = res + v.item
}
}
return res
}
func hasDigitBefore(l []dateStringLayoutItem, position int) bool {
if position >= 2 {
return l[position-2].isDigit && len(l[position-2].item) <= 2
}
return false
}
// commonGenitiveFormatFunc is used for languages with genitive forms of names, like Russian.
func commonGenitiveFormatFunc(value, format string,
knownDaysShort, knownDaysLong, knownMonthsShort, knownMonthsLong,
knownMonthsGenShort, knownMonthsGenLong, knownPeriods map[string]string) (res string) {
l := stringToLayoutItems(value)
f := stringToLayoutItems(format)
if len(l) != len(f) {
return value // layouts does not matches
}
for i, v := range l {
lowerCase := false
var knw map[string]string
switch f[i].item {
case "Mon":
knw = knownDaysShort
case "Monday":
knw = knownDaysLong
case "Jan":
if hasDigitBefore(l, i) {
knw = knownMonthsGenShort
} else {
knw = knownMonthsShort
}
case "January":
if hasDigitBefore(l, i) {
knw = knownMonthsGenLong
} else {
knw = knownMonthsLong
}
case "PM":
knw = knownPeriods
case "pm":
lowerCase = true
knw = knownPeriods
}
knw = mapToLowerCase(knw)
if knw != nil {
tr, ok := knw[strings.ToLower(v.item)]
if !ok {
res = res + v.item
continue
}
if lowerCase == true {
tr = strings.ToLower(tr)
}
res = res + tr
} else {
res = res + v.item
}
}
return res
}
func createCommonFormatFunc(locale Locale) internalFormatFunc {
return func(value, layout string) (res string) {
return commonFormatFunc(value, layout,
knownDaysShort[locale], knownDaysLong[locale], knownMonthsShort[locale], knownMonthsLong[locale], knownPeriods[locale])
}
}
func createCommonFormatFuncWithGenitive(locale Locale) internalFormatFunc {
return func(value, layout string) (res string) {
return commonGenitiveFormatFunc(value, layout,
knownDaysShort[locale], knownDaysLong[locale], knownMonthsShort[locale], knownMonthsLong[locale],
knownMonthsGenitiveShort[locale], knownMonthsGenitiveLong[locale], knownPeriods[locale])
}
}
func createCommonParseFunc(locale Locale) internalParseFunc {
return func(layout, value string) string {
return commonFormatFunc(value, layout,
knownDaysShortReverse[locale], knownDaysLongReverse[locale],
knownMonthsShortReverse[locale], knownMonthsLongReverse[locale], knownPeriodsReverse[locale])
}
}
func createCommonParsetFuncWithGenitive(locale Locale) internalParseFunc {
return func(layout, value string) (res string) {
return commonGenitiveFormatFunc(value, layout,
knownDaysShortReverse[locale], knownDaysLongReverse[locale],
knownMonthsShortReverse[locale], knownMonthsLongReverse[locale],
knownMonthsGenitiveShortReverse[locale], knownMonthsGenitiveLongReverse[locale], knownPeriodsReverse[locale])
}
}
func mapToLowerCase(source map[string]string) map[string]string {
result := make(map[string]string)
for k, v := range source {
result[strings.ToLower(k)] = v
}
return result
} | vendor/github.com/goodsign/monday/format_common.go | 0.520496 | 0.428652 | format_common.go | starcoder |
package common
// Annotations
const (
// AnnotationResumeTestrun is the annotation name to trigger resume on the testrun
AnnotationResumeTestrun = "testmachinery.sapcloud.io/resume"
// AnnotationCollectTestrun is the annotation to trigger collection and persistence of testrun results
AnnotationCollectTestrun = "testmachinery.garden.cloud/collect"
// AnnotationSystemStep is the testflow step annotation to specify that the step is a testmachinery system step.
// It indicates that it should not be considered as a test and therefore should not count for a test to be failed.
AnnotationSystemStep = "testmachinery.sapcloud.io/system-step"
// AnnotationTestDefName is the name of origin TestDefinition.
AnnotationTestDefName = "testmachinery.sapcloud.io/TestDefinition"
// AnnotationTestDefID is the unique name of origin TestDefinition in a specific flow and step.
AnnotationTestDefID = "testmachinery.sapcloud.io/ID"
// LabelTMDashboardIngress is the label to identify TestMachinery ingress objects.
LabelTMDashboardIngress = "testmachinery.garden.cloud/tm-dashboard"
)
// Metadata Annotations
const (
// AnnotationTestrunPurpose is the annotation name to specify a purpose of the testrun
AnnotationTestrunPurpose = "testmachinery.sapcloud.io/purpose"
// AnnotationTemplateIDTestrun is the annotation to specify the name of the template the testrun is rendered from
AnnotationTemplateIDTestrun = "testrunner.testmachinery.gardener.cloud/templateID"
// AnnotationRetries is the annotation to specify the retry count of the current testrun
AnnotationRetries = "testrunner.testmachinery.gardener.cloud/retries"
// AnnotationLandscape is the annotation to specify the landscape this testrun is testing
AnnotationLandscape = "metadata.testmachinery.gardener.cloud/landscape"
// AnnotationK8sVersion is the annotation to specify the k8s version the testrun is testing
AnnotationK8sVersion = "metadata.testmachinery.gardener.cloud/k8sVersion"
// AnnotationCloudProvider is the annotation to specify the cloudprovider the testrun is testing
AnnotationCloudProvider = "metadata.testmachinery.gardener.cloud/cloudprovider"
// AnnotationOperatingSystem is the annotation to specify the operating system of the shoot nodes the testrun is testing
AnnotationOperatingSystem = "metadata.testmachinery.gardener.cloud/operating-system"
// AnnotationRegion is the annotation to specify the region of the shoot the testrun is testing
AnnotationRegion = "metadata.testmachinery.gardener.cloud/region"
// AnnotationZone is the annotation to specify the zone of the shoot the testrun is testing
AnnotationZone = "metadata.testmachinery.gardener.cloud/zone"
// AnnotationFlavorDescription is the annotation to describe the test flavor of the current run testrun
AnnotationFlavorDescription = "metadata.testmachinery.gardener.cloud/flavor-description"
// AnnotationDimension is the annotation to specify the dimension the testrun is testing
AnnotationDimension = "metadata.testmachinery.gardener.cloud/dimension"
// AnnotationGroupPurpose is the annotation to describe a run group with an arbitrary string
AnnotationGroupPurpose = "metadata.testmachinery.gardener.cloud/group-purpose"
// LabelTestrunExecutionGroup is the label to specify the unique name of the run (multiple testruns) this test belongs to.
// A run represents all tests that are running from one testrunner.
LabelTestrunExecutionGroup = "testrunner.testmachinery.gardener.cloud/execution-group"
)
// Testrunner Annotations
const (
// LabelUploadedToGithub is the label to specify whether the testrun result was uploaded to github
LabelUploadedToGithub = "testrunner.testmachinery.sapcloud.io/uploaded-to-github"
// images
DockerImageGardenerApiServer = "eu.gcr.io/gardener-project/gardener/apiserver"
// Repositories
TestInfraRepo = "https://github.com/gardener/test-infra.git"
GardenSetupRepo = "https://github.com/gardener/garden-setup.git"
GardenerRepo = "https://github.com/gardener/gardener.git"
PatternLatest = "latest"
// TM Dashboard
DashboardExecutionGroupParameter = "runID"
// DashboardPaginationFrom is the name of the http parameter for the pagination from index.
DashboardPaginationFrom = "from"
// DashboardPaginationTo is the name of the http parameter for the pagination from index.
DashboardPaginationTo = "to"
)
var (
// Default timeout of 4 hours to wait before resuming the testrun
DefaultPauseTimeout = 14400
) | pkg/common/common.go | 0.506103 | 0.439928 | common.go | starcoder |
package vclock
import (
"bytes"
"encoding/gob"
"fmt"
"log"
"sort"
)
// Condition constants define how to compare a vector clock against another,
// and may be ORed together when being provided to the Compare method.
type Condition int
//Constants define compairison conditions between pairs of vector
//clocks
const (
Equal Condition = 1 << iota
Ancestor
Descendant
Concurrent
)
//Vector clocks are maps of string to uint64 where the string is the
//id of the process, and the uint64 is the clock value
type VClock map[string]uint64
//FindTicks returns the clock value for a given id, if a value is not
//found false is returned
func (vc VClock) FindTicks(id string) (uint64, bool) {
ticks, ok := vc[id]
return ticks, ok
}
//New returns a new vector clock
func New() VClock {
return VClock{}
}
//Copy returs a copy of the clock
func (vc VClock) Copy() VClock {
cp := make(map[string]uint64, len(vc))
for key, value := range vc {
cp[key] = value
}
return cp
}
//CopyFromMap copys a map to a vector clock
func (vc VClock) CopyFromMap(otherMap map[string]uint64) VClock {
return otherMap
}
//GetMap returns the map typed vector clock
func (vc VClock) GetMap() map[string]uint64 {
return map[string]uint64(vc)
}
//Set assigns a clock value to a clock index
func (vc VClock) Set(id string, ticks uint64) {
vc[id] = ticks
}
//Tick has replaced the old update
func (vc VClock) Tick(id string) {
vc[id] = vc[id] + 1
}
//LastUpdate returns the clock value of the oldest clock
func (vc VClock) LastUpdate() (last uint64) {
for key := range vc {
if vc[key] > last {
last = vc[key]
}
}
return last
}
//Merge takes the max of all clock values in other and updates the
//values of the callee
func (vc VClock) Merge(other VClock) {
for id := range other {
if vc[id] < other[id] {
vc[id] = other[id]
}
}
}
//Bytes returns an encoded vector clock
func (vc VClock) Bytes() []byte {
b := new(bytes.Buffer)
enc := gob.NewEncoder(b)
err := enc.Encode(vc)
if err != nil {
log.Fatal("Vector Clock Encode:", err)
}
return b.Bytes()
}
//FromBytes decodes a vector clock
func FromBytes(data []byte) (vc VClock, err error) {
b := new(bytes.Buffer)
b.Write(data)
clock := New()
dec := gob.NewDecoder(b)
err = dec.Decode(&clock)
return clock, err
}
//PrintVC prints the callees vector clock to stdout
func (vc VClock) PrintVC() {
fmt.Println(vc.ReturnVCString())
}
//ReturnVCString returns a string encoding of a vector clock
func (vc VClock) ReturnVCString() string {
//sort
ids := make([]string, len(vc))
i := 0
for id := range vc {
ids[i] = id
i++
}
sort.Strings(ids)
var buffer bytes.Buffer
buffer.WriteString("{")
for i := range ids {
buffer.WriteString(fmt.Sprintf("\"%s\":%d", ids[i], vc[ids[i]]))
if i+1 < len(ids) {
buffer.WriteString(", ")
}
}
buffer.WriteString("}")
return buffer.String()
}
//Compare takes another clock and determines if it is Equal, an
//Ancestor, Descendant, or Concurrent with the callees clock.
func (vc VClock) Compare(other VClock, cond Condition) bool {
var otherIs Condition
// Preliminary qualification based on length
if len(vc) > len(other) {
if cond&(Ancestor|Concurrent) == 0 {
return false
}
otherIs = Ancestor
} else if len(vc) < len(other) {
if cond&(Descendant|Concurrent) == 0 {
return false
}
otherIs = Descendant
} else {
otherIs = Equal
}
//Compare matching items
for id := range other {
if _, found := vc[id]; found {
if other[id] > vc[id] {
switch otherIs {
case Equal:
otherIs = Descendant
break
case Ancestor:
return cond&Concurrent != 0
}
} else if other[id] < vc[id] {
switch otherIs {
case Equal:
otherIs = Ancestor
break
case Descendant:
return cond&Concurrent != 0
}
}
} else {
if otherIs == Equal {
return cond&Concurrent != 0
} else if (len(other) - len(vc) - 1) < 0 {
return cond&Concurrent != 0
}
}
}
//Equal clocks are concurrent
if otherIs == Equal && cond == Concurrent {
cond = Equal
}
return cond&otherIs != 0
} | govec/vclock/vclock.go | 0.761095 | 0.453201 | vclock.go | starcoder |
package engine
// Boundries in horizontal direction for a piece
type XBoundry struct {
left int
right int
}
// Boundries in vertical direction for a piece
type YBoundry struct {
top int
bottom int
}
// Diagonal boundries for a piece
type DiagonalBoundry struct {
topLeft int
topRight int
bottomLeft int
bottomRight int
}
// Calculate left boundry based off current position of a piece
func (p *Piece) setLeftBoundry() {
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos < 1 {
return
}
if pos >= 1 {
switch pos {
case 1, 2, 3, 4, 5, 6, 7, 8: // Left boundry numeric positions on board
p.xBoundry.left = pos
return
default:
pos -= 8
}
}
}
}
// Calculate right boundry based off current position of a piece
func (p *Piece) setRightBoundry() {
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos > 64 {
return
}
if pos <= 64 {
switch pos {
case 57, 58, 59, 60, 61, 62, 63, 64: // Right boundry numeric positions on board
p.xBoundry.right = pos
return
default:
pos += 8
}
}
}
}
// Calculate top boundry based off current position of a piece
func (p *Piece) setTopBoundry() {
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos > 64 {
return
}
if pos <= 64 {
switch pos {
case 8, 16, 24, 32, 40, 48, 56, 64: // Top boundry numeric positions on board
p.yBoundry.top = pos
return
default:
pos++
}
}
}
}
// Calculate bottom boundry based off current position of a piece
func (p *Piece) setBottomBoundry() {
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos < 1 {
return
}
if pos >= 1 {
switch pos {
case 1, 9, 17, 25, 33, 41, 49, 57: // Bottom boundry numeric positions on board
p.yBoundry.bottom = pos
return
default:
pos--
}
}
}
}
// Calculate top-left boundry based off current position of a piece
func (p *Piece) setTopLeftBoundry() {
pos := p.CurPos
for i := 0; i < 8; i++ {
// fmt.Println(pos)
if pos < 2 {
return
}
if pos >= 1 {
switch pos {
case 8, 16, 24, 32, 40, 48, 56, 64: // Top boundry numeric positions on board
p.diagonalBoundry.topLeft = pos
return
case 1, 2, 3, 4, 5, 6, 7: // Left boundry numeric positions on board
p.diagonalBoundry.topLeft = pos
return
default:
if p.isPawn {
// Pawn is not on left boundry or top boundry
// Calculate and set its top-left position
p.diagonalBoundry.topLeft = pos - 7
return
}
pos -= 7
}
}
}
}
// Calculate top-right boundry based off current position of a piece
func (p *Piece) setTopRightBoundry() {
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos > 64 {
return
}
if pos <= 64 {
switch pos {
case 8, 16, 24, 32, 40, 48, 56: // Top boundry numeric positions on board
p.diagonalBoundry.topRight = pos
return
case 57, 58, 59, 60, 61, 62, 63, 64: // Right boundry numeric positions on board
p.diagonalBoundry.topRight = pos
return
default:
if p.isPawn {
// Pawn is not on right boundry or top boundry
// Calculate and set its top-right position
p.diagonalBoundry.topRight = pos + 9
return
}
pos += 9
}
}
}
}
// Calculate bottom-right boundry based off current position of a piece
func (p *Piece) setBottomRightBoundry() {
if p.isPawn {
return
}
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos < 2 {
return
}
if pos >= 2 {
switch pos {
case 1, 9, 17, 25, 33, 41, 49, 57: // Bottom boundry numeric positions on board
p.diagonalBoundry.bottomRight = pos
return
case 58, 59, 60, 61, 62, 63, 64: // Right boundry numeric positions on board
p.diagonalBoundry.bottomRight = pos
return
default:
pos += 7
}
}
}
}
// Calculate bottom-left boundry based off current position of a piece
func (p *Piece) setBottomLeftBoundry() {
if p.isPawn {
return
}
pos := p.CurPos
for i := 0; i < 8; i++ {
if pos < 1 {
return
}
if pos >= 1 {
switch pos {
case 1, 9, 17, 25, 33, 41, 49, 57: // Bottom boundry numeric positions on board
p.diagonalBoundry.bottomLeft = pos
return
case 2, 3, 4, 5, 6, 7, 8: // Left boundry numeric positions on board
p.diagonalBoundry.bottomLeft = pos
return
default:
pos -= 9
}
}
}
}
// Set boundries for a piece
func (p *Piece) setBoundries() {
if p.xAllow {
p.setLeftBoundry()
p.setRightBoundry()
}
if p.yAllow {
p.setTopBoundry()
p.setBottomBoundry()
}
if p.crossAllow {
p.setTopLeftBoundry()
p.setTopRightBoundry()
p.setBottomRightBoundry()
p.setBottomLeftBoundry()
}
// fmt.Printf("%v %8v: %v %v %v %v %v %v %v %v \n", p.Symbol, p.Kind, p.xBoundry.left, p.diagonalBoundry.topLeft, p.yBoundry.top, p.diagonalBoundry.topRight, p.xBoundry.right, p.diagonalBoundry.bottomRight, p.yBoundry.bottom, p.diagonalBoundry.bottomLeft)
} | engine/boundries.go | 0.787482 | 0.424352 | boundries.go | starcoder |
package plaid
import (
"encoding/json"
)
// InvestmentsTransactionsOverride Specify the list of investments transactions on the account.
type InvestmentsTransactionsOverride struct {
// Posting date for the transaction. Must be formatted as an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) date.
Date string `json:"date"`
// The institution's description of the transaction.
Name string `json:"name"`
// The number of units of the security involved in this transaction. Must be positive if the type is a buy and negative if the type is a sell.
Quantity float32 `json:"quantity"`
// The price of the security at which this transaction occurred.
Price float32 `json:"price"`
// The combined value of all fees applied to this transaction.
Fees *float32 `json:"fees,omitempty"`
// The type of the investment transaction. Possible values are: `buy`: Buying an investment `sell`: Selling an investment `cash`: Activity that modifies a cash position `fee`: A fee on the account `transfer`: Activity that modifies a position, but not through buy/sell activity e.g. options exercise, portfolio transfer
Type string `json:"type"`
// Either a valid `iso_currency_code` or `unofficial_currency_code`
Currency string `json:"currency"`
Security *SecurityOverride `json:"security,omitempty"`
}
// NewInvestmentsTransactionsOverride instantiates a new InvestmentsTransactionsOverride object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewInvestmentsTransactionsOverride(date string, name string, quantity float32, price float32, type_ string, currency string) *InvestmentsTransactionsOverride {
this := InvestmentsTransactionsOverride{}
this.Date = date
this.Name = name
this.Quantity = quantity
this.Price = price
this.Type = type_
this.Currency = currency
return &this
}
// NewInvestmentsTransactionsOverrideWithDefaults instantiates a new InvestmentsTransactionsOverride object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewInvestmentsTransactionsOverrideWithDefaults() *InvestmentsTransactionsOverride {
this := InvestmentsTransactionsOverride{}
return &this
}
// GetDate returns the Date field value
func (o *InvestmentsTransactionsOverride) GetDate() string {
if o == nil {
var ret string
return ret
}
return o.Date
}
// GetDateOk returns a tuple with the Date field value
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Date, true
}
// SetDate sets field value
func (o *InvestmentsTransactionsOverride) SetDate(v string) {
o.Date = v
}
// GetName returns the Name field value
func (o *InvestmentsTransactionsOverride) GetName() string {
if o == nil {
var ret string
return ret
}
return o.Name
}
// GetNameOk returns a tuple with the Name field value
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetNameOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Name, true
}
// SetName sets field value
func (o *InvestmentsTransactionsOverride) SetName(v string) {
o.Name = v
}
// GetQuantity returns the Quantity field value
func (o *InvestmentsTransactionsOverride) GetQuantity() float32 {
if o == nil {
var ret float32
return ret
}
return o.Quantity
}
// GetQuantityOk returns a tuple with the Quantity field value
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetQuantityOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Quantity, true
}
// SetQuantity sets field value
func (o *InvestmentsTransactionsOverride) SetQuantity(v float32) {
o.Quantity = v
}
// GetPrice returns the Price field value
func (o *InvestmentsTransactionsOverride) GetPrice() float32 {
if o == nil {
var ret float32
return ret
}
return o.Price
}
// GetPriceOk returns a tuple with the Price field value
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetPriceOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Price, true
}
// SetPrice sets field value
func (o *InvestmentsTransactionsOverride) SetPrice(v float32) {
o.Price = v
}
// GetFees returns the Fees field value if set, zero value otherwise.
func (o *InvestmentsTransactionsOverride) GetFees() float32 {
if o == nil || o.Fees == nil {
var ret float32
return ret
}
return *o.Fees
}
// GetFeesOk returns a tuple with the Fees field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetFeesOk() (*float32, bool) {
if o == nil || o.Fees == nil {
return nil, false
}
return o.Fees, true
}
// HasFees returns a boolean if a field has been set.
func (o *InvestmentsTransactionsOverride) HasFees() bool {
if o != nil && o.Fees != nil {
return true
}
return false
}
// SetFees gets a reference to the given float32 and assigns it to the Fees field.
func (o *InvestmentsTransactionsOverride) SetFees(v float32) {
o.Fees = &v
}
// GetType returns the Type field value
func (o *InvestmentsTransactionsOverride) GetType() string {
if o == nil {
var ret string
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *InvestmentsTransactionsOverride) SetType(v string) {
o.Type = v
}
// GetCurrency returns the Currency field value
func (o *InvestmentsTransactionsOverride) GetCurrency() string {
if o == nil {
var ret string
return ret
}
return o.Currency
}
// GetCurrencyOk returns a tuple with the Currency field value
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetCurrencyOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Currency, true
}
// SetCurrency sets field value
func (o *InvestmentsTransactionsOverride) SetCurrency(v string) {
o.Currency = v
}
// GetSecurity returns the Security field value if set, zero value otherwise.
func (o *InvestmentsTransactionsOverride) GetSecurity() SecurityOverride {
if o == nil || o.Security == nil {
var ret SecurityOverride
return ret
}
return *o.Security
}
// GetSecurityOk returns a tuple with the Security field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *InvestmentsTransactionsOverride) GetSecurityOk() (*SecurityOverride, bool) {
if o == nil || o.Security == nil {
return nil, false
}
return o.Security, true
}
// HasSecurity returns a boolean if a field has been set.
func (o *InvestmentsTransactionsOverride) HasSecurity() bool {
if o != nil && o.Security != nil {
return true
}
return false
}
// SetSecurity gets a reference to the given SecurityOverride and assigns it to the Security field.
func (o *InvestmentsTransactionsOverride) SetSecurity(v SecurityOverride) {
o.Security = &v
}
func (o InvestmentsTransactionsOverride) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["date"] = o.Date
}
if true {
toSerialize["name"] = o.Name
}
if true {
toSerialize["quantity"] = o.Quantity
}
if true {
toSerialize["price"] = o.Price
}
if o.Fees != nil {
toSerialize["fees"] = o.Fees
}
if true {
toSerialize["type"] = o.Type
}
if true {
toSerialize["currency"] = o.Currency
}
if o.Security != nil {
toSerialize["security"] = o.Security
}
return json.Marshal(toSerialize)
}
type NullableInvestmentsTransactionsOverride struct {
value *InvestmentsTransactionsOverride
isSet bool
}
func (v NullableInvestmentsTransactionsOverride) Get() *InvestmentsTransactionsOverride {
return v.value
}
func (v *NullableInvestmentsTransactionsOverride) Set(val *InvestmentsTransactionsOverride) {
v.value = val
v.isSet = true
}
func (v NullableInvestmentsTransactionsOverride) IsSet() bool {
return v.isSet
}
func (v *NullableInvestmentsTransactionsOverride) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableInvestmentsTransactionsOverride(val *InvestmentsTransactionsOverride) *NullableInvestmentsTransactionsOverride {
return &NullableInvestmentsTransactionsOverride{value: val, isSet: true}
}
func (v NullableInvestmentsTransactionsOverride) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableInvestmentsTransactionsOverride) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_investments_transactions_override.go | 0.879509 | 0.499146 | model_investments_transactions_override.go | starcoder |
package graphics2D
import (
"fmt"
"math"
)
type BoundingBox struct {
XMin [2]float32
XMax [2]float32
}
func NewBoundingBox(Geometry []Point) (Box *BoundingBox) {
if len(Geometry) == 0 {
return nil
}
Box = new(BoundingBox)
Box.XMin[0], Box.XMin[1] = Geometry[0].X[0], Geometry[0].X[1]
Box.XMax[0], Box.XMax[1] = Geometry[0].X[0], Geometry[0].X[1]
for _, point := range Geometry {
for i := 0; i < 2; i++ {
if point.X[i] < Box.XMin[i] {
Box.XMin[i] = point.X[i]
}
if point.X[i] > Box.XMax[i] {
Box.XMax[i] = point.X[i]
}
}
}
return Box
}
func BoundingBoxFromGoogleMapsZoom(vCenter Point, canvasWidth int, zoomLevel float64) (viewBox *BoundingBox) {
/*
Calculate longitude width from Google Maps zoomLevel
From: https://developers.google.com/maps/documentation/javascript/coordinates
*/
lonWidth := float64(canvasWidth/256) * 360. / math.Pow(2, math.Max(0, zoomLevel))
halfDelta := 0.5 * float32(lonWidth)
lowLeft := vCenter.Minus(&Point{X: [2]float32{
halfDelta, halfDelta,
}})
upRight := vCenter.Plus(&Point{X: [2]float32{
halfDelta, halfDelta,
}})
viewBox = NewBoundingBox([]Point{*lowLeft, *upRight})
centroid := viewBox.Centroid()
trans := vCenter.Minus(centroid)
return viewBox.Translate(trans.X)
}
func (bb *BoundingBox) Centroid() (centroid *Point) {
return &Point{X: [2]float32{
0.5 * (bb.XMax[0] + bb.XMin[0]),
0.5 * (bb.XMax[1] + bb.XMin[1]),
}}
}
func (bb *BoundingBox) Scale(scale float32) (bbOut *BoundingBox) {
bbOut = new(BoundingBox)
for i := 0; i < 2; i++ {
xRange := bb.XMax[i] - bb.XMin[i]
centroid := bb.XMin[i] + 0.5*xRange
bbOut.XMin[i] = scale*(bb.XMin[i]-centroid) + centroid
bbOut.XMax[i] = scale*(bb.XMax[i]-centroid) + centroid
}
return bbOut
}
func (bb *BoundingBox) ScaleX(scale float32) (bbOut *BoundingBox) {
bbOut = new(BoundingBox)
*bbOut = *bb
xRange := bb.XMax[0] - bb.XMin[0]
centroid := bb.XMin[0] + 0.5*xRange
bbOut.XMin[0] = scale*(bb.XMin[0]-centroid) + centroid
bbOut.XMax[0] = scale*(bb.XMax[0]-centroid) + centroid
return bbOut
}
func (bb *BoundingBox) ScaleY(scale float32) (bbOut *BoundingBox) {
bbOut = new(BoundingBox)
*bbOut = *bb
xRange := bb.XMax[1] - bb.XMin[1]
centroid := bb.XMin[1] + 0.5*xRange
bbOut.XMin[1] = scale*(bb.XMin[1]-centroid) + centroid
bbOut.XMax[1] = scale*(bb.XMax[1]-centroid) + centroid
return bbOut
}
func (bb *BoundingBox) Translate(panX [2]float32) (bbOut *BoundingBox) {
bbOut = new(BoundingBox)
for i := 0; i < 2; i++ {
bbOut.XMin[i] = bb.XMin[i] + panX[i]
bbOut.XMax[i] = bb.XMax[i] + panX[i]
}
return bbOut
}
func (bb *BoundingBox) MoveToOrigin() (bbOut *BoundingBox) {
bbOut = new(BoundingBox)
for i := 0; i < 2; i++ {
bbOut.XMin[i] = 0
bbOut.XMax[i] = bb.XMax[i] - bb.XMin[i]
}
return bbOut
}
func (bb *BoundingBox) Grow(newBB *BoundingBox) {
for i := 0; i < 2; i++ {
bb.XMin[i] = float32(math.Min(float64(bb.XMin[i]), float64(newBB.XMin[i])))
bb.XMax[i] = float32(math.Max(float64(bb.XMax[i]), float64(newBB.XMax[i])))
}
}
func (bb *BoundingBox) Divide(denom *BoundingBox) (scaleX []float64) {
scaleX = make([]float64, 2)
for i := 0; i < 2; i++ {
denomRange := denom.XMax[i] - denom.XMin[i]
bbRange := bb.XMax[i] - bb.XMin[i]
scaleX[i] = float64(bbRange / denomRange)
}
return scaleX
}
func (bb *BoundingBox) Outline() (pLine *PolyLine) {
pt1 := Point{X: [2]float32{
bb.XMin[0], bb.XMin[1],
}}
pt2 := Point{X: [2]float32{
bb.XMax[0], bb.XMin[1],
}}
pt3 := Point{X: [2]float32{
bb.XMax[0], bb.XMax[1],
}}
pt4 := Point{X: [2]float32{
bb.XMin[0], bb.XMax[1],
}}
return NewPolyLine([]Point{pt1, pt2, pt3, pt4, pt1})
}
func (bb *BoundingBox) PointInside(point *Point) (within bool) {
for ii := 0; ii < 2; ii++ {
if point.X[ii] > bb.XMax[ii] || point.X[ii] < bb.XMin[ii] {
return false
}
}
return true
}
type GeometryInterface interface {
GetGeometry() []Point
SetGeometry([]Point) // Allows for external transformations
GetBoundingBox() *BoundingBox
Area() float64
Centroid() *Point
}
type BaseGeometryClass struct {
Box *BoundingBox
Geometry []Point
}
func (bg *BaseGeometryClass) GetGeometry() (geom []Point) {
return bg.Geometry
}
func (bg *BaseGeometryClass) SetGeometry(geom []Point) {
bg.Geometry = geom
}
func (bg *BaseGeometryClass) GetBoundingBox() (bb *BoundingBox) {
return bg.Box
}
func (bg *BaseGeometryClass) Area() (area float64) { return 0 }
func (bg *BaseGeometryClass) Centroid() (ct *Point) { return bg.Box.Centroid() }
func (bg *BaseGeometryClass) TransformLambertAzimuthal(radius float64, center *Point) {
/*
From: www.epsg.org/Portals/0/373-07-2.pdf
Projects the spherical coordinates geometry (in degrees of lon,lat) to
a circular representation of a sphere of radius R
Areas computed in the resulting projection are correct
We use the center coordinates of the transformation for the FE and FN ("False Easting"
and "False Northing" coordinates, which are arbitrary
*/
lambda0 := float64(center.X[0]) * math.Pi / 180
phi1 := float64(center.X[1]) * math.Pi / 180
cosPhi1 := math.Cos(phi1)
inverseCosPhi1 := 1 / cosPhi1
radScale := math.Pi / 180
for i := range bg.Geometry {
lambda := float64(bg.Geometry[i].X[0]) * radScale
phi := float64(bg.Geometry[i].X[1]) * radScale
bg.Geometry[i].X[0] = float32(radius*cosPhi1*(lambda-lambda0)) + center.X[0]
bg.Geometry[i].X[1] = float32(radius*math.Sin(phi)*inverseCosPhi1) + center.X[1]
}
}
func (bg *BaseGeometryClass) InverseTransformLambertAzimuthal(radius float64, center *Point) {
/*
Inverse transform - note that we use the center coordinate as both the [FE,FN] and [lambda,phi]0
*/
phi1 := float64(center.X[1]) * math.Pi / 180
cosPhi1 := math.Cos(phi1)
inverseR := 1 / radius
inverseRCosPhi1 := 1 / (radius * cosPhi1)
degScale := 180 / math.Pi
for i := range bg.Geometry {
E := float64(bg.Geometry[i].X[0])
N := float64(bg.Geometry[i].X[1])
bg.Geometry[i].X[0] = center.X[0] + float32(degScale*inverseRCosPhi1*(E-float64(center.X[0])))
bg.Geometry[i].X[1] = float32(degScale * math.Asin(cosPhi1*inverseR*(N-float64(center.X[1]))))
}
}
type Point struct {
X [2]float32
}
func NewPoint(x, y float32) *Point {
a := new(Point)
a.X = [2]float32{x, y}
return a
}
func (pt *Point) GetGeometry() (geom []Point) {
return []Point{*pt}
}
func (pt *Point) SetGeometry(geom []Point) {
*pt = geom[0]
}
func (pt *Point) GetBoundingBox() (box *BoundingBox) {
return nil
}
func (pt *Point) Area() (area float64) { return 0 }
func (pt *Point) Centroid() (centroid *Point) {
return pt
}
func (pt *Point) Minus(rhs *Point) (res *Point) {
return &Point{X: [2]float32{
pt.X[0] - rhs.X[0],
pt.X[1] - rhs.X[1],
}}
}
func (pt *Point) Plus(rhs *Point) (res *Point) {
return &Point{X: [2]float32{
pt.X[0] + rhs.X[0],
pt.X[1] + rhs.X[1],
}}
}
func (pt *Point) Equal(rhs Point) bool {
return pt.X[0] == rhs.X[0] && pt.X[1] == rhs.X[1]
}
func (pt *Point) Scale(scale Point) {
pt.X[0] *= scale.X[0]
pt.X[1] *= scale.X[1]
}
type Line struct {
Box *BoundingBox
geometry [2]Point
}
func NewLine(pt1, pt2 Point) (line *Line) {
x1, y1, x2, y2 :=
float64(pt1.X[0]),
float64(pt1.X[1]),
float64(pt2.X[0]),
float64(pt2.X[1])
pl := new(Line)
pl.Box = new(BoundingBox)
pl.Box.XMin[0] = float32(math.Min(x1, x2))
pl.Box.XMax[0] = float32(math.Max(x1, x2))
pl.Box.XMin[1] = float32(math.Min(y1, y2))
pl.Box.XMax[1] = float32(math.Max(y1, y2))
pl.geometry = [2]Point{pt1, pt2}
return pl
}
func (ln *Line) GetGeometry() (geom []Point) {
return []Point{ln.geometry[0], ln.geometry[1]}
}
func (ln *Line) SetGeometry(geom []Point) {
ln.geometry[0] = geom[0]
ln.geometry[1] = geom[1]
}
func (ln *Line) GetBoundingBox() (box *BoundingBox) {
return ln.Box
}
func (ln *Line) Area() (area float64) { return 0 }
func (ln *Line) Centroid() (centroid *Point) {
return ln.Box.Centroid()
}
type Triangle struct {
Nodes [3]int32
}
type QuadMesh struct {
BaseGeometryClass
Dimensions [2]int
Attributes [][]float32
}
type TriMesh struct {
BaseGeometryClass
Triangles []Triangle
Attributes [][]float32
}
func (tm *TriMesh) Area() (area float64) {
/*
From: https://www.mathopenref.com/coordtrianglearea.html
*/
for _, tri := range tm.Triangles {
x1 := float64(tm.Geometry[tri.Nodes[0]].X[0])
y1 := float64(tm.Geometry[tri.Nodes[0]].X[1])
x2 := float64(tm.Geometry[tri.Nodes[1]].X[0])
y2 := float64(tm.Geometry[tri.Nodes[1]].X[1])
x3 := float64(tm.Geometry[tri.Nodes[2]].X[0])
y3 := float64(tm.Geometry[tri.Nodes[2]].X[1])
area += 0.5 * (x1*(y2-y3) + x2*(y3-y1) + x3*(y1-y2))
}
return area
}
func (tm *TriMesh) Centroid() (centroid *Point) {
var area, a, xc, yc float64
for _, tri := range tm.Triangles {
x1 := float64(tm.Geometry[tri.Nodes[0]].X[0])
y1 := float64(tm.Geometry[tri.Nodes[0]].X[1])
x2 := float64(tm.Geometry[tri.Nodes[1]].X[0])
y2 := float64(tm.Geometry[tri.Nodes[1]].X[1])
x3 := float64(tm.Geometry[tri.Nodes[2]].X[0])
y3 := float64(tm.Geometry[tri.Nodes[2]].X[1])
a = 0.5 * (x1*(y2-y3) + x2*(y3-y1) + x3*(y1-y2))
xc += a * (x1 + x2 + x3) / 3
yc += a * (y1 + y2 + y3) / 3
area += a
}
centroid = &Point{X: [2]float32{
float32(xc / area), float32(yc / area),
}}
return centroid
}
func (tm *TriMesh) AddQuadMesh(mesh *QuadMesh) error {
iDim, jDim := mesh.Dimensions[0], mesh.Dimensions[1]
Len := len(mesh.Geometry)
if iDim*jDim != Len {
return fmt.Errorf("dimension mismatch %d x %d is not equal to %d",
iDim, jDim, Len)
}
tm.Geometry = mesh.Geometry
tm.Attributes = mesh.Attributes
/*
Create the triangle mesh
*/
NodeNum := func(i, j, iDim int) (nodeNum int32) {
return int32(i + j*iDim)
}
for j := 0; j < jDim-1; j++ {
for i := 0; i < iDim-1; i++ {
nn1 := NodeNum(i, j, iDim)
nn2 := NodeNum(i+1, j, iDim)
nn3 := NodeNum(i, j+1, iDim)
nn4 := NodeNum(i+1, j+1, iDim)
tm.Triangles = append(
tm.Triangles,
Triangle{[3]int32{nn1, nn2, nn3}},
)
tm.Triangles = append(
tm.Triangles,
Triangle{[3]int32{nn3, nn4, nn2}},
)
}
}
return nil
}
type PolyLine struct {
BaseGeometryClass
}
func NewPolyLine(geom []Point) (pl *PolyLine) {
p_pl := new(PolyLine)
p_pl.Box = NewBoundingBox(geom)
p_pl.Geometry = geom
return p_pl
}
type Polygon struct {
BaseGeometryClass
}
func NewPolygon(geom []Point) (poly *Polygon) {
/*
Close off the polygon if needed
*/
if !geom[len(geom)-1].Equal(geom[0]) {
geom = append(geom, geom[0])
}
pPoly := new(Polygon)
pPoly.Box = NewBoundingBox(geom)
pPoly.Geometry = geom
return pPoly
}
func NewNgon(centroid Point, radius float64, n int) (poly *Polygon) {
nF := float64(n)
angleInc := 2 * math.Pi / nF
var geom []Point
for i := 0; i < n; i++ {
angle := 2*math.Pi - float64(i)*angleInc // Generate in counterclockwise order for positive normal
geom = append(geom,
*centroid.Plus(&Point{X: [2]float32{
float32(math.Sin(angle) * radius),
float32(math.Cos(angle) * radius),
}}))
}
return NewPolygon(geom)
}
func NewNgonGivenArea(centroid Point, area float64, n int) (poly *Polygon) {
area = math.Max(-area, area)
nF := float64(n)
angle := 2 * math.Pi / nF
radius := math.Sqrt(2 * area / (nF * math.Sin(angle)))
return NewNgon(centroid, radius, n)
}
func (pg *Polygon) Centroid() (centroid *Point) {
/*
From: https://en.wikipedia.org/wiki/Centroid#Centroid_of_a_polygon
*/
centroid = &Point{X: [2]float32{0, 0}}
area := pg.Area()
ct := [2]float64{0, 0}
for i := 0; i < len(pg.Geometry)-1; i++ {
pt0 := pg.Geometry[i]
pt1 := pg.Geometry[i+1]
x0, y0 := float64(pt0.X[0]), float64(pt0.X[1])
x1, y1 := float64(pt1.X[0]), float64(pt1.X[1])
metric := x0*y1 - y0*x1
ct[0] += (x0 + x1) * metric
ct[1] += (y0 + y1) * metric
}
for i := 0; i < 2; i++ {
centroid.X[i] = float32(ct[i] / (6 * area))
}
return centroid
}
func (pg *Polygon) Area() (area float64) {
/*
Algorithm: Green's theorem in the plane
*/
var a64 float64
for i := 0; i < len(pg.Geometry)-1; i++ {
pt0 := pg.Geometry[i]
pt1 := pg.Geometry[i+1]
x0, y0 := float64(pt0.X[0]), float64(pt0.X[1])
x1, y1 := float64(pt1.X[0]), float64(pt1.X[1])
a64 += x0*y1 - x1*y0
}
return 0.5 * a64
}
func (pg *Polygon) PointInside(point Point) (inside bool) {
if !pg.Box.PointInside(&point) {
return false
}
/*
Algorithm:
Winding Number from http://geomalgorithms.com/a03-_inclusion.html#wn_PnPoly()
if wn = 0, the point is outside
*/
/*
isLeft(): tests if a point is Left|On|Right of an infinite line.
Input: three points P0, P1, and P2
Return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
See: Algorithm 1 "Area of Triangles and Polygons"
*/
isLeft := func(P0, P1, P2 Point) float32 {
return (P1.X[0]-P0.X[0])*(P2.X[1]-P0.X[1]) -
(P2.X[0]-P0.X[0])*(P1.X[1]-P0.X[1])
}
var wn int
for i := 0; i < len(pg.Geometry)-1; i++ {
pt0 := pg.Geometry[i]
pt1 := pg.Geometry[i+1]
if pt0.X[1] <= point.X[1] {
if pt1.X[1] > point.X[1] {
if isLeft(pt0, pt1, point) > 0 {
wn++
}
}
} else {
if pt1.X[1] <= point.X[1] {
if isLeft(pt0, pt1, point) < 0 {
wn--
}
}
}
}
return wn != 0
}
func LineLineIntersection(line1, line2 *Line) *Point {
/*
Check intersection of lines in two phases - calculate denominator, then full
From: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
*/
//epsilon := float32(math.SmallestNonzeroFloat32 * 1000000)
x1, x2 := float64(line1.geometry[0].X[0]), float64(line1.geometry[1].X[0])
y1, y2 := float64(line1.geometry[0].X[1]), float64(line1.geometry[1].X[1])
x3, x4 := float64(line2.geometry[0].X[0]), float64(line2.geometry[1].X[0])
y3, y4 := float64(line2.geometry[0].X[1]), float64(line2.geometry[1].X[1])
denom := (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)
/*
if float32(math.Abs(float64(denom))) < epsilon {
return nil
}
*/
m1 := x1*y2 - y1*x2
m2 := x3*y4 - y3*x4
Xnum := m1*(x3-x4) - (x1-x2)*m2
Ynum := m1*(y3-y4) - (y1-y2)*m2
return &Point{X: [2]float32{
float32(Xnum / denom),
float32(Ynum / denom),
}}
} | geometry/graphics.go | 0.684264 | 0.557845 | graphics.go | starcoder |
package app
//discuss @iso13818-1.pdf, page 61
import (
"encoding/binary"
"go_srs/srs/utils"
)
type SrsTsPayloadPATProgram struct {
// 4B
/**
* Program_number is a 16-bit field. It specifies the program to which the program_map_PID is
* applicable. When set to 0x0000, then the following PID reference shall be the network PID. For all other cases the value
* of this field is user defined. This field shall not take any single value more than once within one version of the Program
* Association Table.
*/
number int16 // 16bits
/**
* reverved value, must be '1'
*/
const1Value int8 //3bits
/**
* program_map_PID/network_PID 13bits
* network_PID - The network_PID is a 13-bit field, which is used only in conjunction with the value of the
* program_number set to 0x0000, specifies the PID of the Transport Stream packets which shall contain the Network
* Information Table. The value of the network_PID field is defined by the user, but shall only take values as specified in
* Table 2-3. The presence of the network_PID is optional.
*/
pid int16 //13bits
}
func NewSrsTsPayloadPATProgram(program_number int16, p int16) *SrsTsPayloadPATProgram {
return &SrsTsPayloadPATProgram{
number: program_number,
const1Value: 0x7,
pid: p,
}
}
func (this *SrsTsPayloadPATProgram) Encode(stream *utils.SrsStream) {
var tmpv int32 = int32(this.pid) & 0x1FFF
tmpv |= int32((uint32(this.number) << 16) & 0xFFFF0000)
tmpv |= (int32(this.const1Value) << 13) & 0xE000
stream.WriteInt32(tmpv, binary.BigEndian)
}
func (this *SrsTsPayloadPATProgram) Size() uint32 {
return 4
}
type SrsTsPayloadPAT struct {
psiHeader *SrsTsPayloadPSI
/*
This is a 16-bit field which serves as a label to identify this Transport Stream from any other
multiplex within a network. Its value is defined by the user
*/
transportStreamId int16 //固定为0x0001
const1Value0 int8 //2bits
/*
This 5-bit field is the version number of the whole Program Association Table. The version number
shall be incremented by 1 modulo 32 whenever the definition of the Program Association Table changes. When the
current_next_indicator is set to '1', then the version_number shall be that of the currently applicable Program Association
Table. When the current_next_indicator is set to '0', then the version_number shall be that of the next applicable Program
Association Table.
*/
versionNumber int8 //5bits 版本号,固定为二进制00000,如果PAT有变化则版本号加1
/*
A 1-bit indicator, which when set to '1' indicates that the Program Association Table sent is
currently applicable. When the bit is set to '0', it indicates that the table sent is not yet applicable and shall be the next
table to become valid.
*/
currentNextIndicator int8 //固定为二进制1,表示这个PAT表可以用,如果为0则要等待下一个PAT表
/*
This 8-bit field gives the number of this section. The section_number of the first section in the
Program Association Table shall be 0x00. It shall be incremented by 1 with each additional section in the Program
Association Table.
*/
sectionNumber int8 //固定为0x00
/*
This 8-bit field specifies the number of the last section (that is, the section with the highest
section_number) of the complete Program Association Table.
*/
lastSectionNumber int8 //固定为0x00
// multiple 4B program data.
programs []*SrsTsPayloadPATProgram
// 4B
/**
* This is a 32-bit field that contains the CRC value that gives a zero output of the registers in the decoder
* defined in Annex A after processing the entire section.
* @remark crc32(bytes without pointer field, before crc32 field)
*/
crc32 int32
}
func NewSrsTsPayloadPAT(p *SrsTsPacket) *SrsTsPayloadPAT {
return &SrsTsPayloadPAT{
psiHeader: NewSrsTsPayloadPSI(p),
}
}
func CreatePAT(context *SrsTsContext, pmt_number int16, pmt_pid int16) *SrsTsPacket {
pkt := NewSrsTsPacket()
pkt.tsHeader.syncByte = SRS_TS_SYNC_BYTE
pkt.tsHeader.transportErrorIndicator = 0
pkt.tsHeader.payloadUnitStartIndicator = 1
pkt.tsHeader.transportPriority = 0
pkt.tsHeader.PID = SrsTsPidPAT
pkt.tsHeader.transportScrambingControl = SrsTsScrambledDisabled
pkt.tsHeader.adaptationFieldControl = SrsTsAdapationControlPayloadOnly
pkt.tsHeader.continuityCounter = 0
pat := NewSrsTsPayloadPAT(pkt)
pat.psiHeader.pointerField = 0
pat.psiHeader.tableId = SrsTsPsiTableIdPas
pat.psiHeader.sectionSyntaxIndicator = 1
pat.psiHeader.const0Value = 0
pat.psiHeader.const1Value0 = 0x03 //2bits
pat.psiHeader.sectionLength = 0 //calc in size
pat.transportStreamId = 1
pat.const1Value0 = 0x3 //2bits
pat.versionNumber = 0
pat.currentNextIndicator = 1
pat.sectionNumber = 0
pat.lastSectionNumber = 0
program := NewSrsTsPayloadPATProgram(pmt_number, pmt_pid)
pat.programs = append(pat.programs, program)
//calc section length
pat.psiHeader.sectionLength = int16(pat.Size())
//填充payload
s := utils.NewSrsStream([]byte{})
pat.Encode(s)
pkt.payload = s.Data()
return pkt
}
func (this *SrsTsPayloadPAT) Encode(stream *utils.SrsStream) {
s := utils.NewSrsStream([]byte{}) //3
this.psiHeader.Encode(s) //5
s.WriteInt16(this.transportStreamId, binary.BigEndian)
this.const1Value0 = 0x03
var b byte = 0
b |= byte(this.currentNextIndicator & 0x01)
b |= byte((this.versionNumber << 1) & 0x3e)
b |= byte(this.const1Value0<<6) & 0xC0
s.WriteByte(b)
s.WriteByte(byte(this.sectionNumber))
s.WriteByte(byte(this.lastSectionNumber)) //5
for i := 0; i < len(this.programs); i++ {
this.programs[i].Encode(s) //4
}
CRC32 := utils.MpegtsCRC32(s.Data()[1:])
s.WriteInt32(int32(CRC32), binary.BigEndian) //4
stream.WriteBytes(s.Data())
if len(stream.Data())+4 < 188 {
i := 188 - len(stream.Data()) - 4
for j := 0; j < i; j++ {
stream.WriteByte(0xff)
}
}
}
func (this *SrsTsPayloadPAT) Decode(stream *utils.SrsStream) error {
return nil
}
func (this *SrsTsPayloadPAT) Size() uint32 {
var m uint32 = 0
for i := 0; i < len(this.programs); i++ {
m += this.programs[i].Size()
}
return 5 + m + 4
} | srs/app/srs_ts_pat.go | 0.526586 | 0.436862 | srs_ts_pat.go | starcoder |
package graph
import (
"fmt"
"math"
"github.com/moorara/algo/pkg/graphviz"
)
// FlowEdge represents a capacitated edge data type.
type FlowEdge struct {
from, to int
capacity, flow float64
}
// From returns the tail vertex of the edge.
func (e *FlowEdge) From() int {
return e.from
}
// To returns the head vertex of the edge.
func (e *FlowEdge) To() int {
return e.to
}
// Other returns the other vertext of this edge.
func (e *FlowEdge) Other(v int) int {
switch v {
case e.from:
return e.to
case e.to:
return e.from
default:
return -1
}
}
// Capacity returns the capacity of the edge.
func (e *FlowEdge) Capacity() float64 {
return e.capacity
}
// Flow returns the flow on the edge.
func (e *FlowEdge) Flow() float64 {
return e.flow
}
// ResidualCapacityTo returns the residual capacity of the edge in the direction to the given vertex.
func (e *FlowEdge) ResidualCapacityTo(v int) float64 {
switch v {
// backward edge
case e.from:
return e.flow
// forward edge
case e.to:
return e.capacity - e.flow
// invalid
default:
return -1
}
}
// AddResidualFlowTo changes the flow on the edge in the direction to the given vertex.
// If vertex is the tail vertex (backward edge), this decreases the flow on the edge by delta.
// If vertex is the head vertex (forward edge), this increases the flow on the edge by delta.
// If delta is valid, edge will be modified and true will be returned.
// If delta is not valid, edge will not be modified and false will be returned.
func (e *FlowEdge) AddResidualFlowTo(v int, delta float64) bool {
if delta < 0 {
return false
}
var flow float64
switch v {
// backward edge
case e.from:
flow = e.flow - delta
// forward edge
case e.to:
flow = e.flow + delta
// invalid
default:
return false
}
// Round flow to 0 or capacity if within floating point precision
if math.Abs(flow) <= float64Epsilon {
flow = 0
}
if math.Abs(flow-e.capacity) <= float64Epsilon {
flow = e.capacity
}
if flow < 0 || flow > e.capacity {
return false
}
e.flow = flow
return true
}
// FlowNetwork represents a capacitated network graph data type.
// Each directed edge has a real number capacity and flow.
type FlowNetwork struct {
v, e int
adj [][]FlowEdge
}
// NewFlowNetwork creates a new capacitated network graph.
func NewFlowNetwork(V int, edges ...FlowEdge) *FlowNetwork {
adj := make([][]FlowEdge, V)
for i := range adj {
adj[i] = make([]FlowEdge, 0)
}
g := &FlowNetwork{
v: V, // no. of vertices
e: 0, // no. of edges
adj: adj,
}
for _, e := range edges {
g.AddEdge(e)
}
return g
}
// V returns the number of vertices.
func (g *FlowNetwork) V() int {
return g.v
}
// E returns the number of edges.
func (g *FlowNetwork) E() int {
return g.e
}
func (g *FlowNetwork) isVertexValid(v int) bool {
return v >= 0 && v < g.v
}
// Adj returns the edges both pointing to and from a vertex.
func (g *FlowNetwork) Adj(v int) []FlowEdge {
if !g.isVertexValid(v) {
return nil
}
return g.adj[v]
}
// AddEdge adds a new edge to the network.
func (g *FlowNetwork) AddEdge(e FlowEdge) {
v := e.From()
w := e.To()
if g.isVertexValid(v) && g.isVertexValid(w) {
g.e++
g.adj[v] = append(g.adj[v], e)
g.adj[w] = append(g.adj[w], e)
}
}
// Edges returns all directed edges in the graph.
func (g *FlowNetwork) Edges() []FlowEdge {
edges := make([]FlowEdge, 0)
for v, adjEdges := range g.adj {
for _, e := range adjEdges {
if e.To() != v {
edges = append(edges, e)
}
}
}
return edges
}
// Graphviz returns a visualization of the graph in Graphviz format.
func (g *FlowNetwork) Graphviz() string {
graph := graphviz.NewGraph(true, true, "", "", "", graphviz.StyleSolid, graphviz.ShapeCircle)
for i := 0; i < g.v; i++ {
name := fmt.Sprintf("%d", i)
graph.AddNode(graphviz.NewNode("", "", name, "", "", "", "", ""))
}
for v := range g.adj {
for _, e := range g.adj[v] {
from := fmt.Sprintf("%d", e.From())
to := fmt.Sprintf("%d", e.To())
label := fmt.Sprintf("%f/%f", e.Flow(), e.Capacity())
graph.AddEdge(graphviz.NewEdge(from, to, graphviz.EdgeTypeDirected, "", label, "", "", ""))
}
}
return graph.DotCode()
} | graph/flow.go | 0.897063 | 0.686958 | flow.go | starcoder |
package sortedset
import (
"encoding/json"
"fmt"
"github.com/dusk-network/dusk-blockchain/pkg/util"
)
// Cluster is a sortedset that keeps track of duplicates.
type Cluster struct {
Set
elements map[string]int
}
// NewCluster returns a new empty Cluster.
func NewCluster() Cluster {
return Cluster{
Set: New(),
elements: make(map[string]int),
}
}
// Equal tests for equality with another Cluster.
func (c Cluster) Equal(other Cluster) bool {
for i, k := range c.Set {
if k.Cmp(other.Set[i]) != 0 {
return false
}
if c.OccurrencesOf(k.Bytes()) != other.OccurrencesOf(k.Bytes()) {
return false
}
}
return true
}
// TotalOccurrences returns the amount of elements in the cluster.
func (c Cluster) TotalOccurrences() int {
size := 0
for _, v := range c.elements {
size += v
}
return size
}
// Unravel creates a sorted array of []byte adding as many duplicates as the
// occurrence of the various elements in ascending order.
func (c Cluster) Unravel() [][]byte {
pks := make([][]byte, 0)
for _, pk := range c.Set {
occurrences := c.elements[string(pk.Bytes())]
for i := 0; i < occurrences; i++ {
pks = append(pks, pk.Bytes())
}
}
return pks
}
// OccurrencesOf return the occurrence a []byte has been inserted in the
// cluster.
func (c Cluster) OccurrencesOf(b []byte) int {
if n, ok := c.elements[string(b)]; ok {
return n
}
return 0
}
// Insert a []byte into the cluster and updates the element counts.
// Returns true if the element is new, false otherwise.
func (c *Cluster) Insert(b []byte) bool {
k := string(b)
if ok := c.Set.Insert(b); !ok {
c.elements[k] = c.elements[k] + 1
return false
}
c.elements[k] = 1
return true
}
// RemoveAll occurrences of a []byte from the cluster and updates the element counts.
// Returns the amount of occurrences that have been removed.
func (c *Cluster) RemoveAll(b []byte) int {
k := string(b)
occurrences, ok := c.elements[k]
if !ok {
return 0
}
c.Set.Remove(b)
delete(c.elements, k)
return occurrences
}
// Remove a []byte from the cluster and updates the element counts.
// Returns false if the element cannot be found.
func (c *Cluster) Remove(b []byte) bool {
k := string(b)
occurrences, ok := c.elements[k]
if !ok {
return false
}
if occurrences == 1 {
c.Set.Remove(b)
delete(c.elements, k)
return true
}
c.elements[k] = occurrences - 1
return true
}
// IntersectCluster performs an intersect operation with a Cluster represented
// through a uint64 bitmap.
func (c *Cluster) IntersectCluster(committeeSet uint64) Cluster {
set := c.Intersect(committeeSet)
elems := make(map[string]int)
for _, elem := range set {
elems[string(elem.Bytes())] = c.OccurrencesOf(elem.Bytes())
}
return Cluster{
Set: set,
elements: elems,
}
}
// Format implements fmt.Formatter interface.
func (c Cluster) Format(f fmt.State, r rune) {
for _, elem := range c.Set {
count := c.OccurrencesOf(elem.Bytes())
r := fmt.Sprintf("(blsPk: %s,count: %d)", util.StringifyBytes(elem.Bytes()), count)
_, _ = f.Write([]byte(r))
}
}
// MarshalJSON ...
func (c Cluster) MarshalJSON() ([]byte, error) {
data := make([]string, 0)
for _, elem := range c.Set {
count := c.OccurrencesOf(elem.Bytes())
r := fmt.Sprintf("Key: %s, Count: %d", util.StringifyBytes(elem.Bytes()), count)
data = append(data, r)
}
return json.Marshal(data)
} | pkg/util/nativeutils/sortedset/cluster.go | 0.824674 | 0.417331 | cluster.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTFSValueWithUnits1817 struct for BTFSValueWithUnits1817
type BTFSValueWithUnits1817 struct {
BTFSValue1888
BtType *string `json:"btType,omitempty"`
QuantityType *string `json:"quantityType,omitempty"`
UnitToPower *map[string]int32 `json:"unitToPower,omitempty"`
Value *float64 `json:"value,omitempty"`
ValueObject *float64 `json:"valueObject,omitempty"`
}
// NewBTFSValueWithUnits1817 instantiates a new BTFSValueWithUnits1817 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTFSValueWithUnits1817() *BTFSValueWithUnits1817 {
this := BTFSValueWithUnits1817{}
return &this
}
// NewBTFSValueWithUnits1817WithDefaults instantiates a new BTFSValueWithUnits1817 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTFSValueWithUnits1817WithDefaults() *BTFSValueWithUnits1817 {
this := BTFSValueWithUnits1817{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTFSValueWithUnits1817) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFSValueWithUnits1817) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTFSValueWithUnits1817) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTFSValueWithUnits1817) SetBtType(v string) {
o.BtType = &v
}
// GetQuantityType returns the QuantityType field value if set, zero value otherwise.
func (o *BTFSValueWithUnits1817) GetQuantityType() string {
if o == nil || o.QuantityType == nil {
var ret string
return ret
}
return *o.QuantityType
}
// GetQuantityTypeOk returns a tuple with the QuantityType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFSValueWithUnits1817) GetQuantityTypeOk() (*string, bool) {
if o == nil || o.QuantityType == nil {
return nil, false
}
return o.QuantityType, true
}
// HasQuantityType returns a boolean if a field has been set.
func (o *BTFSValueWithUnits1817) HasQuantityType() bool {
if o != nil && o.QuantityType != nil {
return true
}
return false
}
// SetQuantityType gets a reference to the given string and assigns it to the QuantityType field.
func (o *BTFSValueWithUnits1817) SetQuantityType(v string) {
o.QuantityType = &v
}
// GetUnitToPower returns the UnitToPower field value if set, zero value otherwise.
func (o *BTFSValueWithUnits1817) GetUnitToPower() map[string]int32 {
if o == nil || o.UnitToPower == nil {
var ret map[string]int32
return ret
}
return *o.UnitToPower
}
// GetUnitToPowerOk returns a tuple with the UnitToPower field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFSValueWithUnits1817) GetUnitToPowerOk() (*map[string]int32, bool) {
if o == nil || o.UnitToPower == nil {
return nil, false
}
return o.UnitToPower, true
}
// HasUnitToPower returns a boolean if a field has been set.
func (o *BTFSValueWithUnits1817) HasUnitToPower() bool {
if o != nil && o.UnitToPower != nil {
return true
}
return false
}
// SetUnitToPower gets a reference to the given map[string]int32 and assigns it to the UnitToPower field.
func (o *BTFSValueWithUnits1817) SetUnitToPower(v map[string]int32) {
o.UnitToPower = &v
}
// GetValue returns the Value field value if set, zero value otherwise.
func (o *BTFSValueWithUnits1817) GetValue() float64 {
if o == nil || o.Value == nil {
var ret float64
return ret
}
return *o.Value
}
// GetValueOk returns a tuple with the Value field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFSValueWithUnits1817) GetValueOk() (*float64, bool) {
if o == nil || o.Value == nil {
return nil, false
}
return o.Value, true
}
// HasValue returns a boolean if a field has been set.
func (o *BTFSValueWithUnits1817) HasValue() bool {
if o != nil && o.Value != nil {
return true
}
return false
}
// SetValue gets a reference to the given float64 and assigns it to the Value field.
func (o *BTFSValueWithUnits1817) SetValue(v float64) {
o.Value = &v
}
// GetValueObject returns the ValueObject field value if set, zero value otherwise.
func (o *BTFSValueWithUnits1817) GetValueObject() float64 {
if o == nil || o.ValueObject == nil {
var ret float64
return ret
}
return *o.ValueObject
}
// GetValueObjectOk returns a tuple with the ValueObject field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFSValueWithUnits1817) GetValueObjectOk() (*float64, bool) {
if o == nil || o.ValueObject == nil {
return nil, false
}
return o.ValueObject, true
}
// HasValueObject returns a boolean if a field has been set.
func (o *BTFSValueWithUnits1817) HasValueObject() bool {
if o != nil && o.ValueObject != nil {
return true
}
return false
}
// SetValueObject gets a reference to the given float64 and assigns it to the ValueObject field.
func (o *BTFSValueWithUnits1817) SetValueObject(v float64) {
o.ValueObject = &v
}
func (o BTFSValueWithUnits1817) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTFSValue1888, errBTFSValue1888 := json.Marshal(o.BTFSValue1888)
if errBTFSValue1888 != nil {
return []byte{}, errBTFSValue1888
}
errBTFSValue1888 = json.Unmarshal([]byte(serializedBTFSValue1888), &toSerialize)
if errBTFSValue1888 != nil {
return []byte{}, errBTFSValue1888
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.QuantityType != nil {
toSerialize["quantityType"] = o.QuantityType
}
if o.UnitToPower != nil {
toSerialize["unitToPower"] = o.UnitToPower
}
if o.Value != nil {
toSerialize["value"] = o.Value
}
if o.ValueObject != nil {
toSerialize["valueObject"] = o.ValueObject
}
return json.Marshal(toSerialize)
}
type NullableBTFSValueWithUnits1817 struct {
value *BTFSValueWithUnits1817
isSet bool
}
func (v NullableBTFSValueWithUnits1817) Get() *BTFSValueWithUnits1817 {
return v.value
}
func (v *NullableBTFSValueWithUnits1817) Set(val *BTFSValueWithUnits1817) {
v.value = val
v.isSet = true
}
func (v NullableBTFSValueWithUnits1817) IsSet() bool {
return v.isSet
}
func (v *NullableBTFSValueWithUnits1817) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTFSValueWithUnits1817(val *BTFSValueWithUnits1817) *NullableBTFSValueWithUnits1817 {
return &NullableBTFSValueWithUnits1817{value: val, isSet: true}
}
func (v NullableBTFSValueWithUnits1817) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTFSValueWithUnits1817) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btfs_value_with_units_1817.go | 0.713731 | 0.419826 | model_btfs_value_with_units_1817.go | starcoder |
package leetcode
type Trie struct {
root *node
}
/** Initialize your data structure here. */
func Constructor7() Trie {
return Trie{
root: &node{},
}
}
type node struct {
val []byte
flag bool
children []*node
}
func newNode(val []byte, flag bool) *node {
return &node{
children: make([]*node, 0),
val: val,
flag: flag,
}
}
/** Inserts a word into the trie. */
func (this *Trie) Insert(word string) {
insertToTrie(this.root, []byte(word))
}
// root is parent, children is the place to search
func insertToTrie(root *node, bs []byte) {
if len(bs) == 0 {
return
}
// the first time insert word
if len(root.children) == 0 {
root.children = append(root.children, newNode(bs, true))
return
}
find := false
for _, nd := range root.children {
i := 0
for j := 0; j < len(nd.val); j++ {
if i == len(bs) || bs[i] != nd.val[j] {
break
}
find = true
i++
}
if !find {
continue
}
// at this place find the same prefix
// nd.val is search over
if i == len(nd.val) {
// bs is not search over
if i < len(bs) {
insertToTrie(nd, bs[i:])
return
}
// bs is search over
// the word is already a node, set flag to true
if i == len(bs) {
nd.flag = true
return
}
}
// at the place, i < len(val), i can't > len(val)
// find same prefix [0: i)
// nd should break into 2 node
nf := newNode(nd.val[i:], nd.flag)
nf.children = nd.children
nd.children = make([]*node, 0)
nd.children = append(nd.children, nf)
nd.val = nd.val[:i]
nd.flag = true
if i < len(bs) {
ns := newNode(bs[i:], true)
nd.children = append(nd.children, ns)
// set flag to false if bs isn't over
nd.flag = false
}
return
}
// at this place, not find same prefix in this node
root.children = append(root.children, newNode(bs, true))
}
// len(bs) always > 0
func searchInTrie(root *node, bs []byte) (startWith, equal bool) {
if len(root.children) == 0 {
return
}
find := false
for _, nd := range root.children {
i := 0
for j := 0; j < len(nd.val); j++ {
if i == len(bs) || bs[i] != nd.val[j] {
break
}
find = true
i++
}
if !find {
continue
}
// at this place find the same prefix
// nd.val is search over
if i == len(nd.val) {
// bs is not search over
if i < len(bs) {
return searchInTrie(nd, bs[i:])
}
// bs is search over
// the word is already a node, set flag to true
if i == len(bs) {
startWith = true
equal = nd.flag
return
}
}
// at the place, i < len(val), i can't > len(val)
// find same prefix [0: i)
// nd should break into 2 node
if i == len(bs) {
startWith = true
equal = false
return
}
return
}
return
}
/** Returns if the word is in the trie. */
func (this *Trie) Search(word string) bool {
_, equal := searchInTrie(this.root, []byte(word))
return equal
}
/** Returns if there is any word in the trie that starts with the given prefix. */
func (this *Trie) StartsWith(prefix string) bool {
startWith, _ := searchInTrie(this.root, []byte(prefix))
return startWith
} | 208_implement-trie-prefix-tree.go | 0.559531 | 0.455562 | 208_implement-trie-prefix-tree.go | starcoder |
package random
import (
"fmt"
"github.com/LuighiV/payload-generator/generator/converter"
"math/rand"
"time"
)
// GenerateRandom returns a random value receiving the a base value and
// variation value which determines the range of variation.
func GenerateRandom(basevalue float64, rangevariation float64) float64 {
rand.Seed(time.Now().UnixNano())
return basevalue + rand.Float64()*rangevariation - rangevariation/2
}
// A Base_Values to introduce base values and variation values for each
// parameter (temperature, pressure and humidity)
type Base_Values struct {
temperature_base float64
humidity_base float64
pressure_base float64
temperature_variation float64
humidity_variation float64
pressure_variation float64
}
func NewBaseValues(
temperature_base float64,
humidity_base float64,
pressure_base float64,
temperature_variation float64,
humidity_variation float64,
pressure_variation float64,
) (*Base_Values, error) {
base_values := Base_Values{
temperature_base,
humidity_base,
pressure_base,
temperature_variation,
humidity_variation,
pressure_variation,
}
return &base_values, nil
}
// Data holds the information of the parameters converted to integers and the
// payload in bytes
type Data struct {
temperature int
humidity int
pressure int
payload []byte
}
// DataOption is the kind of option to be applied to the Data structure
type DataOption func(*Data) error
// WithTemperature returns a temperature value with a base and variation values
func WithTemperature(base float64, variation float64) DataOption {
return func(d *Data) error {
d.temperature = int(GenerateRandom(base, variation) * 100)
return nil
}
}
// WithHumidity returns a humidity value with a base and variation values
func WithHumidity(base float64, variation float64) DataOption {
return func(d *Data) error {
d.humidity = int(GenerateRandom(base, variation) * 100)
return nil
}
}
// WithPressure returns a pressure value with a base and variation values
func WithPressure(base float64, variation float64) DataOption {
return func(d *Data) error {
d.pressure = int(GenerateRandom(base, variation) * 100)
return nil
}
}
func WithBaseValues(base *Base_Values) DataOption {
return func(d *Data) error {
WithTemperature(base.temperature_base, base.temperature_variation)(d)
WithHumidity(base.humidity_base, base.humidity_variation)(d)
WithPressure(base.pressure_base, base.pressure_variation)(d)
return nil
}
}
// GetPayload returns the payload in bytes
func GetPayload(d *Data) []byte {
return d.payload
}
// LoadPayload generates the payload based on the temperature,
// humidity and pressure values
func LoadPayload() DataOption {
return func(d *Data) error {
bs := make([]byte, 12)
bs = converter.ConvertIntToBytes(d.temperature)
bs = append(bs, converter.ConvertIntToBytes(d.humidity)...)
bs = append(bs, converter.ConvertIntToBytes(d.pressure)...)
d.payload = bs
return nil
}
}
// NewData is a function to create the data structure and apply options to
// generate the parameters of temperature, pressure and humidity
func NewData(opts ...DataOption) (*Data, error) {
d := &Data{}
for _, o := range opts {
if err := o(d); err != nil {
return nil, err
}
}
fmt.Println(d.temperature)
fmt.Println(d.humidity)
fmt.Println(d.pressure)
LoadPayload()(d)
return d, nil
} | generator/random/metheorological.go | 0.889018 | 0.581689 | metheorological.go | starcoder |
package other
import (
"time"
"github.com/kasworld/h4o/_examples/app"
"github.com/kasworld/h4o/appwindow"
"github.com/kasworld/h4o/eventtype"
"github.com/kasworld/h4o/experimental/collision"
"github.com/kasworld/h4o/geometry"
"github.com/kasworld/h4o/gls"
"github.com/kasworld/h4o/graphic"
"github.com/kasworld/h4o/light"
"github.com/kasworld/h4o/log"
"github.com/kasworld/h4o/material"
"github.com/kasworld/h4o/math32"
"github.com/kasworld/h4o/util/helper"
"math/rand"
)
func init() {
app.DemoMap["other.raycast"] = &Raycast{}
}
type Raycast struct {
rc *collision.Raycaster
}
// Start is called once at the start of the demo.
func (t *Raycast) Start(a *app.App) {
// Create axes helper
axes := helper.NewAxes(1)
a.Scene().Add(axes)
l1 := light.NewDirectional(&math32.Color{1, 1, 1}, 1.0)
l1.SetPosition(0, 0, 5)
a.Scene().Add(l1)
// Plane
geom1 := geometry.NewPlane(1.5, 1)
mat1 := material.NewStandard(&math32.Color{0, 1, 0})
mat1.SetSide(material.SideFront)
mesh1 := graphic.NewMesh(geom1, mat1)
mesh1.SetPosition(-1.2, 0, 0)
a.Scene().Add(mesh1)
// Box
geom2 := geometry.NewCube(1)
mat2 := material.NewStandard(&math32.Color{1, 0, 0})
mat2.SetSide(material.SideFront)
mesh2 := graphic.NewMesh(geom2, mat2)
mesh2.SetPosition(1.2, 0, 0)
a.Scene().Add(mesh2)
// Sphere
geom3 := geometry.NewSphere(0.5, 16, 16)
mat3 := material.NewStandard(&math32.Color{0, 1, 1})
mesh3 := graphic.NewMesh(geom3, mat3)
mesh3.SetPosition(0, 1, -1)
a.Scene().Add(mesh3)
// Open ended cylinder
geom4 := geometry.NewCylinder(0.5, 1, 16, 1, false, false)
mat4 := material.NewStandard(&math32.Color{1, 1, 0})
mat4.SetSide(material.SideDouble)
mesh4 := graphic.NewMesh(geom4, mat4)
mesh4.SetPosition(0, -1.2, -0.5)
a.Scene().Add(mesh4)
// Disk
geom5 := geometry.NewDisk(0.6, 5)
mat5 := material.NewStandard(&math32.Color{0.5, 0.5, 0.9})
mat5.SetSide(material.SideDouble)
mesh5 := graphic.NewMesh(geom5, mat5)
mesh5.SetPosition(-1.2, -1.2, -0.5)
mesh5.SetRotation(math32.Pi/4, 0, 0)
a.Scene().Add(mesh5)
// Torus
geom6 := geometry.NewTorus(0.5, 0.2, 16, 16, math32.Pi)
mat6 := material.NewStandard(&math32.Color{0, 0, 0.5})
mat6.SetSide(material.SideDouble)
mesh6 := graphic.NewMesh(geom6, mat6)
mesh6.SetPosition(1.5, -1.2, -1)
a.Scene().Add(mesh6)
// Cone (ConeCylinder)
geom7 := geometry.NewCone(0.5, 1, 16, 1, true)
mat7 := material.NewStandard(&math32.Color{0.8, 0.7, 0.3})
mat7.SetSide(material.SideFront)
mat7.SetOpacity(0.6)
mat7.SetTransparent(true)
mesh7 := graphic.NewMesh(geom7, mat7)
mesh7.SetPosition(0, 0, 0)
a.Scene().Add(mesh7)
// Sprite
mat8 := material.NewStandard(&math32.Color{0, 0.3, 1})
mesh8 := graphic.NewSprite(1, 1, mat8)
mesh8.SetPosition(2, -2, -2)
mesh8.SetRotationZ(math32.Pi / 4)
mesh8.SetScale(2, 1, 1)
a.Scene().Add(mesh8)
// Line strip
geom9 := geometry.NewGeometry()
positions := math32.NewArrayF32(0, 0)
positions.Append(
-1, 0, -1, 1, 0, -1,
-1, 1, -1, 1, 1, -1,
-1, 2, -1, 1, 2, -1,
)
geom9.AddVBO(gls.NewVBO(positions).AddAttrib(gls.VertexPosition))
mat9 := material.NewStandard(&math32.Color{1, 0, 0})
mesh9 := graphic.NewLineStrip(geom9, mat9)
mesh9.SetPosition(-1.5, 0.5, -0.4)
a.Scene().Add(mesh9)
// Line segments
geom10 := geometry.NewGeometry()
positions = math32.NewArrayF32(0, 0)
positions.Append(
0, 0, 0, 1, 0, 0,
0, 0, 0, -1, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, -1, 0,
0, 0, 0, 0, 0, -1,
0, 0, 0, 0, 0, -1,
0, 0, 0, 0, 0, 1,
0.1, 0.1, 0.1, 0.5, 0.5, 0.5,
)
geom10.AddVBO(gls.NewVBO(positions).AddAttrib(gls.VertexPosition))
mat10 := material.NewStandard(&math32.Color{0, 0, 1})
mesh10 := graphic.NewLines(geom10, mat10)
mesh10.SetScale(0.8, 0.8, 0.8)
mesh10.SetPosition(1, 1.5, 0)
a.Scene().Add(mesh10)
// Points
geom11 := geometry.NewGeometry()
positions = math32.NewArrayF32(0, 0)
for i := 0; i < 30; i++ {
x := rand.Float32()
y := rand.Float32()
z := rand.Float32()
positions.Append(x, y, z)
}
geom11.AddVBO(gls.NewVBO(positions).AddAttrib(gls.VertexPosition))
mat11 := material.NewPoint(&math32.Color{0, 0, 0})
mat11.SetSize(50)
mesh11 := graphic.NewPoints(geom11, mat11)
mesh11.SetPosition(-2, -1, 0)
a.Scene().Add(mesh11)
// Creates the raycaster
t.rc = collision.NewRaycaster(&math32.Vector3{}, &math32.Vector3{})
t.rc.LinePrecision = 0.05
t.rc.PointPrecision = 0.05
// Subscribe to mouse button down events
a.SubscribeID(eventtype.OnMouseDown, a, func(evname eventtype.EventType, ev interface{}) {
t.onMouse(a, ev)
})
}
func (t *Raycast) onMouse(a *app.App, ev interface{}) {
// Convert mouse coordinates to normalized device coordinates
mev := ev.(*appwindow.MouseEvent)
width, height := a.GetSize()
x := 2*(mev.Xpos/float32(width)) - 1
y := -2*(mev.Ypos/float32(height)) + 1
// Set the raycaster from the current camera and mouse coordinates
t.rc.SetFromCamera(a.Camera(), x, y)
//fmt.Printf("rc:%+v\n", t.rc.Ray)
// Checks intersection with all objects in the scene
intersects := t.rc.IntersectObjects(a.Scene().Children(), true)
//fmt.Printf("intersects:%+v\n", intersects)
if len(intersects) == 0 {
return
}
// Get first intersection
obj := intersects[0].Object
// Convert INode to IGraphic
ig, ok := obj.(graphic.GraphicI)
if !ok {
log.Debug("Not graphic:%T", obj)
return
}
// Get graphic object
gr := ig.GetGraphic()
imat := gr.GetMaterial(0)
type matI interface {
EmissiveColor() math32.Color
SetEmissiveColor(*math32.Color)
}
if v, ok := imat.(matI); ok {
if em := v.EmissiveColor(); em.R == 1 && em.G == 1 && em.B == 1 {
v.SetEmissiveColor(&math32.Color{0, 0, 0})
} else {
v.SetEmissiveColor(&math32.Color{1, 1, 1})
}
}
}
// Update is called every frame.
func (t *Raycast) Update(a *app.App, deltaTime time.Duration) {}
// Cleanup is called once at the end of the demo.
func (t *Raycast) Cleanup(a *app.App) {} | _examples/demos/other/raycast.go | 0.591015 | 0.468487 | raycast.go | starcoder |
package index
import (
"github.com/jtejido/golucene/core/util"
)
/*
IndexInput that knows how to read the byte slices written by Posting
and PostingVector. We read the bytes in each slice until we hit the
end of that slice at which point we read the forwarding address of
the next slice and then jump to it.
*/
type ByteSliceReader struct {
*util.DataInputImpl
pool *util.ByteBlockPool
bufferUpto int
buffer []byte
upto int
limit int
level int
bufferOffset int
endIndex int
}
func newByteSliceReader() *ByteSliceReader {
ans := new(ByteSliceReader)
ans.DataInputImpl = util.NewDataInput(ans)
return ans
}
func (r *ByteSliceReader) init(pool *util.ByteBlockPool, startIndex, endIndex int) {
assert(endIndex-startIndex >= 0)
assert(startIndex >= 0)
assert(endIndex >= 0)
r.pool = pool
r.endIndex = endIndex
r.level = 0
r.bufferUpto = startIndex / util.BYTE_BLOCK_SIZE
r.bufferOffset = r.bufferUpto * util.BYTE_BLOCK_SIZE
r.buffer = pool.Buffers[r.bufferUpto]
r.upto = startIndex & util.BYTE_BLOCK_MASK
firstSize := util.LEVEL_SIZE_ARRAY[0]
if startIndex+firstSize >= endIndex {
// there is only this one slice to read
r.limit = endIndex & util.BYTE_BLOCK_MASK
} else {
r.limit = r.upto + firstSize - 4
}
}
func (r *ByteSliceReader) eof() bool {
assert(r.upto+r.bufferOffset <= r.endIndex)
return r.upto+r.bufferOffset == r.endIndex
}
func (r *ByteSliceReader) ReadByte() (byte, error) {
assert(!r.eof())
assert(r.upto <= r.limit)
if r.upto == r.limit {
r.nextSlice()
}
b := r.buffer[r.upto]
r.upto++
return b, nil
}
func (r *ByteSliceReader) nextSlice() {
// skip to our next slice
nextIndex := (int(r.buffer[r.limit]) << 24) +
(int(r.buffer[r.limit+1]) << 16) +
(int(r.buffer[r.limit+2]) << 8) +
int(r.buffer[r.limit+3])
r.level = util.NEXT_LEVEL_ARRAY[r.level]
newSize := util.LEVEL_SIZE_ARRAY[r.level]
r.bufferUpto = nextIndex / util.BYTE_BLOCK_SIZE
r.bufferOffset = r.bufferUpto * util.BYTE_BLOCK_SIZE
r.buffer = r.pool.Buffers[r.bufferUpto]
r.upto = nextIndex & util.BYTE_BLOCK_MASK
if nextIndex+newSize >= r.endIndex {
// we are advancing to the final slice
assert(r.endIndex-nextIndex > 0)
r.limit = r.endIndex - r.bufferOffset
} else {
// this is not the final slice (subtract 4 for the forwarding
// address at the end of this new slice)
r.limit = r.upto + newSize - 4
}
}
func (r *ByteSliceReader) ReadBytes(buf []byte) error {
panic("not implemented yet")
} | core/index/byteSliceReader.go | 0.608478 | 0.493531 | byteSliceReader.go | starcoder |
package fractales
import (
"math"
"math/big"
"math/cmplx"
"github.com/Balise42/marzipango/params"
)
const r = 1000
// MandelbrotContinuousValueLow returns the fractional number of iterations corresponding to a complex in the Mandelbrot set with low precision input
func MandelbrotContinuousValueLow(c complex128, maxiter int) (float64, bool) {
z := 0 + 0i
for i := 0; i < maxiter; i++ {
z = z*z + c
if absz := cmplx.Abs(z); absz > r {
return (float64(i) + 1 - math.Log2(math.Log2(absz))), true
}
}
return math.MaxInt64, false
}
// MandelbrotContinuousValueComputerLow returns a ValueComputation for the mandelbrot set with low precision input
func MandelbrotContinuousValueComputerLow(params params.ImageParams) ValueComputation {
return func(x int, y int) (float64, bool) {
return MandelbrotContinuousValueLow(scale(x, y, params), params.MaxIter)
}
}
// MandelbrotContinuousValueHigh returns the number of iterations corresponding to a complex in the Mandelbrot set with high precision input
func MandelbrotContinuousValueHigh(c *LargeComplex, maxiter int) (float64, bool) {
z := LargeComplex{big.NewFloat(0), big.NewFloat(0)}
for i := 0; i < maxiter; i++ {
z = z.Square().Add(c)
if absz := z.Abs64(); absz > r {
return (float64(i) + 1 - math.Log2(math.Log2(absz))), true
}
}
return math.MaxInt64, false
}
// MandelbrotContinuousValueComputerHigh returns a ValueComputation for the mandelbrot set with high precision input
func MandelbrotContinuousValueComputerHigh(params params.ImageParams) ValueComputation {
return func(x int, y int) (float64, bool) {
z := scaleHigh(x, y, params)
return MandelbrotContinuousValueHigh(&z, params.MaxIter)
}
}
// MandelbrotOrbitValueLow returns the distance to the closest orbit hit by the computation of iterations corresponding to a complex in the Mandelbrot set in low precision
func MandelbrotOrbitValueLow(c complex128, maxiter int, orbits []params.Orbit) (float64, bool) {
dist := math.MaxFloat64
var z complex128
i := 0
for i < maxiter && cmplx.Abs(z) < 4 {
z = z*z + c
for _, orbit := range orbits {
dist = math.Min(dist, orbit.GetOrbitValue(orbit.GetOrbitFastValue(z)))
}
i++
}
if i == maxiter {
return math.MaxFloat64, false
}
return dist, true
}
// MandelbrotOrbitValueComputerLow returns a ValueComputation for the julia set with orbit trapping
func MandelbrotOrbitValueComputerLow(params params.ImageParams, orbits []params.Orbit) ValueComputation {
return func(x int, y int) (float64, bool) {
return MandelbrotOrbitValueLow(scale(x, y, params), params.MaxIter, orbits)
}
}
// MultibrotContinuousValueLow returns the number of iterations corresponding to a complex in the Multibrot set (with d > 2)
func MultibrotContinuousValueLow(c complex128, maxiter int, power complex128) (float64, bool) {
B := math.Pow(2, 1/(real(power)-1))
z := 0 + 0i
for i := 0; i < maxiter; i++ {
z = cmplx.Pow(z, power) + c
if absz := cmplx.Abs(z); absz > r {
return (float64(i) + 1 - (math.Log(math.Log(absz)/math.Log(B)) / math.Log2(real(power)))), true
}
}
return math.MaxInt64, false
}
// MultibrotContinuousValueComputerLow returns a ValueComputation for the Multibrot set
func MultibrotContinuousValueComputerLow(params params.ImageParams) ValueComputation {
return func(x int, y int) (float64, bool) {
return MultibrotContinuousValueLow(scale(x, y, params), params.MaxIter, complex(params.Power, 0.0))
}
} | fractales/mandelbrot.go | 0.787155 | 0.59075 | mandelbrot.go | starcoder |
package monthlypayments
import (
"fmt"
"math"
"sort"
"time"
"github.com/jinzhu/now"
"github.com/lealoureiro/mortgage-calculator-api/model"
)
// CalculateLinearMonthlyPayments : calculate the monthly payments for a Linear Mortgage
func CalculateLinearMonthlyPayments(r model.MonthlyPaymentsRequest) model.MonthlyPayments {
result := make([]model.MonthPayment, 0, r.Months)
var interestSet InterestSet
if r.AutomaticInterestUpdate {
interestSet = LoanToValueInterestSet{r.MarketValue.AsFloat(), r.LoanToValueInterestTiers}
} else {
interestSet = InterestUpdatesSet{
r.MarketValue.AsFloat(),
r.InitialInterestRate.AsFloat(),
r.InterestTierUpdates,
}
}
monthlyRepayment := r.InitialPrincipal / float64(r.Months)
principal := r.InitialPrincipal
interestPercentage := 0.0
interestGrossAmount := 0.0
interestNetAmount := 0.0
totalGrossInterest := 0.0
totalNetInterest := 0.0
incomeTax := float64(r.IncomeTax) / 100.0
currentTime := r.StartDate.AsTime()
endOfMonth := now.With(currentTime).EndOfMonth()
remainingDaysInitialMonth := daysBetweenDates(currentTime, endOfMonth)
initialInterest, marketValue := interestSet.GetInterest(currentTime, principal)
initialInterestGross := ((principal * initialInterest) / float64(360)) * float64(remainingDaysInitialMonth+1)
firstDayNextMonth := endOfMonth.Add(time.Nanosecond * time.Duration(1))
endOfMonth = now.With(firstDayNextMonth).EndOfMonth()
daysFirstRepaymentMonth := daysBetweenDates(firstDayNextMonth, endOfMonth)
firstMonthInterestGross := ((principal*initialInterest)/float64(360))*float64(daysFirstRepaymentMonth) + initialInterestGross
firstMonthInterestNet := firstMonthInterestGross - (firstMonthInterestGross * incomeTax)
totalGrossInterest += firstMonthInterestGross
totalNetInterest += firstMonthInterestNet
paymentDate := firstDayNextMonth.AddDate(0, 1, 0)
var payment = model.MonthPayment{}
payment.Month = 1
payment.PaymentDate = model.NewJSONTime(paymentDate)
payment.Repayment = model.NewNumber(monthlyRepayment)
payment.InterestGrossAmount = model.NewNumber(firstMonthInterestGross)
payment.InterestNetAmount = model.NewNumber(firstMonthInterestNet)
payment.Principal = model.NewNumber(principal)
payment.InterestPercentage = model.NewNumber(initialInterest * 100)
payment.TotalGross = model.NewNumber(monthlyRepayment + firstMonthInterestGross)
payment.TotalNet = model.NewNumber(monthlyRepayment + firstMonthInterestNet)
payment.LoanToValueRatio = model.NewNumber(principal / marketValue * 100)
payment.MarketValue = model.NewNumber(marketValue)
result = append(result, payment)
principal -= monthlyRepayment
currentTime = paymentDate
repayments := r.Repayments
monthAveragePrincipal := 0.0
for i := 2; principal > 0; i++ {
repayments, monthAveragePrincipal = processExtraRepayments(repayments, &principal, currentTime, now.With(currentTime).EndOfMonth())
if principal < monthlyRepayment {
monthlyRepayment = principal
}
interestPercentage, marketValue = interestSet.GetInterest(currentTime, principal)
interestGrossAmount = monthAveragePrincipal * (interestPercentage / 12.0)
interestNetAmount = interestGrossAmount - (interestGrossAmount * incomeTax)
totalGrossInterest += interestGrossAmount
totalNetInterest += interestNetAmount
currentTime = currentTime.AddDate(0, 1, 0)
var payment = model.MonthPayment{}
payment.Month = i
payment.PaymentDate = model.NewJSONTime(currentTime)
payment.Repayment = model.NewNumber(monthlyRepayment)
payment.InterestGrossAmount = model.NewNumber(interestGrossAmount)
payment.InterestNetAmount = model.NewNumber(interestNetAmount)
payment.Principal = model.NewNumber(principal)
payment.InterestPercentage = model.NewNumber(interestPercentage * 100)
payment.TotalGross = model.NewNumber(monthlyRepayment + interestGrossAmount)
payment.TotalNet = model.NewNumber(monthlyRepayment + interestNetAmount)
payment.LoanToValueRatio = model.NewNumber(principal / marketValue * 100)
payment.MarketValue = model.NewNumber(marketValue)
result = append(result, payment)
principal -= monthlyRepayment
}
return model.MonthlyPayments{
Payments: result,
TotalGrossInterest: model.NewNumber(totalGrossInterest),
TotalNetInterest: model.NewNumber(totalNetInterest)}
}
func processExtraRepayments(rp []model.Repayment, p *float64, s, e time.Time) ([]model.Repayment, float64) {
if len(rp) == 0 {
return rp, *p
}
days := 0
totalPrincipal := 0.0
for d := s; d.Before(e); d = d.AddDate(0, 0, 1) {
rp = findRepaymentsForDate(rp, d, p)
days++
totalPrincipal += *p
}
monthAveragePrincipal := totalPrincipal / float64(days)
return rp, monthAveragePrincipal
}
func findRepaymentsForDate(rp []model.Repayment, d time.Time, p *float64) []model.Repayment {
if len(rp) == 0 {
return rp
}
remaining := make([]model.Repayment, 0, len(rp))
for _, v := range rp {
if d.Equal(v.Date.AsTime()) {
*p -= v.Amount
} else {
remaining = append(remaining, v)
}
}
return remaining
}
// ValidateInputData : validate the input data request to calculate Mortgage Monthly payments
func ValidateInputData(r model.MonthlyPaymentsRequest) (bool, string) {
if r.AutomaticInterestUpdate && len(r.LoanToValueInterestTiers) == 0 {
return false, "No loan to value interest tiers provided!"
}
if !r.AutomaticInterestUpdate && len(r.InterestTierUpdates) == 0 {
return false, "No interest tiers month updates provided!"
}
if r.MarketValue == nil {
return false, "Missing initial Market Value!"
}
if r.StartDate == nil {
return false, "Missing start date!"
}
if r.AutomaticInterestUpdate {
sort.Slice(r.LoanToValueInterestTiers[:], func(i, j int) bool {
return r.LoanToValueInterestTiers[i].Percentage < r.LoanToValueInterestTiers[j].Percentage
})
initialTierPercentage := r.LoanToValueInterestTiers[len(r.LoanToValueInterestTiers)-1].Percentage / 100
initialRatio := r.InitialPrincipal / r.MarketValue.AsFloat()
if initialRatio > initialTierPercentage {
return false, fmt.Sprintf("No interest tier found for initial percentage of %.2f %%", initialRatio*100)
}
} else {
if r.InitialInterestRate == nil {
return false, "Missing initial interest rate!"
}
for _, u := range r.InterestTierUpdates {
if u.UpdateDate.AsTime().Before(r.StartDate.AsTime()) {
return false, fmt.Sprintf("Interest update date %d before mortgage start date!", u.UpdateDate)
}
if u.Interest == nil && u.MarketValue == nil {
return false, fmt.Sprintf("Manually update for month %d should contain at least Market Value or Interest Rate!", u.UpdateDate)
}
}
}
if r.IncomeTax < 0 || r.IncomeTax > 100 {
return false, "Income tax should be between 0% and 100%!"
}
return true, ""
}
func daysBetweenDates(t1, t2 time.Time) int32 {
duration := t2.Sub(t1).Hours() / 24
return int32(math.Round(duration))
} | monthlypayments/monthly_payments.go | 0.696991 | 0.550909 | monthly_payments.go | starcoder |
package bloom
import (
"github.com/iotexproject/go-pkgs/hash"
"github.com/pkg/errors"
)
type (
// bloom2048b implements a 2048-bit bloom filter
bloom2048b struct {
array [256]byte
numHash uint // number of hash function
}
)
// newBloom2048 returns a 2048-bit bloom filter
func newBloom2048(h uint) (BloomFilter, error) {
if h == 0 || h > 16 {
return nil, errors.New("expecting 0 < number of hash functions <= 16")
}
return &bloom2048b{numHash: h}, nil
}
// bloom2048FromBytes constructs a 2048-bit bloom filter from bytes
func bloom2048FromBytes(b []byte, h uint) (BloomFilter, error) {
if h == 0 || h > 16 {
return nil, errors.New("expecting 0 < number of hash functions <= 16")
}
if len(b) != 256 {
return nil, errors.Errorf("wrong length %d, expecting 256", len(b))
}
f := bloom2048b{numHash: h}
copy(f.array[:], b[:])
return &f, nil
}
// Size of bloom filter in bits
func (b *bloom2048b) Size() uint64 {
return 2048
}
// NumHash is the number of hash functions used
func (b *bloom2048b) NumHash() uint64 {
return uint64(b.numHash)
}
// NumElements is the number of elements in the bloom filter
func (b *bloom2048b) NumElements() uint64 {
// this is new API, does not apply to 2048-bit
return 0
}
// Add 32-byte key into bloom filter
func (f *bloom2048b) Add(key []byte) {
if key == nil {
return
}
h := hash.Hash256b(key)
// each 2-byte pair used as output of hash function
for i := uint(0); i < f.numHash; i++ {
f.setBit(h[2*i], h[2*i+1])
}
}
// Exist checks if a key is in bloom filter
func (f *bloom2048b) Exist(key []byte) bool {
if key == nil {
return false
}
h := hash.Hash256b(key)
for i := uint(0); i < f.numHash; i++ {
if !f.chkBit(h[2*i], h[2*i+1]) {
return false
}
}
return true
}
// Bytes returns the bytes of bloom filter
func (f *bloom2048b) Bytes() []byte {
return f.array[:]
}
func (f *bloom2048b) setBit(bytePos, bitPos byte) {
// bytePos indicates which byte to set
// lower 3-bit of bitPos indicates which bit to set
mask := 1 << (bitPos & 7)
f.array[bytePos] |= byte(mask)
}
func (f *bloom2048b) chkBit(bytePos, bitPos byte) bool {
mask := 1 << (bitPos & 7)
return (f.array[bytePos] & byte(mask)) != 0
} | bloom/bloom2048b.go | 0.787237 | 0.515864 | bloom2048b.go | starcoder |
package tables
type OrderedTable struct {
data map[string][]string
order []string
}
func NewOrderedTable() OrderedTable {
return OrderedTable{
data: make(map[string][]string, 0),
order: make([]string, 0),
}
}
// Create a new ordered table given a two dimensional slice.
func NewOrderedTableFromMatrix(data [][]string) OrderedTable {
header := data[0]
tbl := OrderedTable{
order: make([]string, len(header)),
data: make(map[string][]string),
}
for i, item := range header {
tbl.order[i] = item
tbl.data[item] = extractColumn(data[1:], i)
}
return tbl
}
// Add a row to the table object
func (t *OrderedTable) AddRow(row []string) {
for i, key := range t.order {
t.data[key] = append(t.data[key], row[i])
}
}
// Add a new column to the table object, expects the first
// item in the slice to be the header name.
func (t *OrderedTable) AddColumn(column []string) {
header := column[0]
t.data[header] = column[1:]
}
// Return the ordered table as a two dimensional slice [][]string
func (t *OrderedTable) Matrix() [][]string {
return t.MatrixWithOrder(t.order)
}
// Return the ordered table as a two dimensional slice [][]string
// given a specified order
func (t *OrderedTable) MatrixWithOrder(order []string) [][]string {
matrix := make([][]string, t.rowCount()+1)
matrix[0] = order
for i, _ := range matrix[1:] {
row := make([]string, len(matrix[0]))
for k, key := range matrix[0] {
row[k] = t.data[key][i]
}
matrix[i+1] = row
}
return matrix
}
// Return the table as a pretty string
func (t *OrderedTable) String() string {
return Table(t.Matrix(), true)
}
// Return the table as a string with the given order
func (t *OrderedTable) StringWithOrder(order []string) string {
return Table(t.MatrixWithOrder(order), true)
}
// Return the table as a string using a provided delimiter and frame
// character.
func (t *OrderedTable) CustomString(delimiter, frame string) string {
return CustomTable(t.Matrix(), true, delimiter, frame)
}
// Return the table as a string using the provided delimiter and frame
// characters in the specified order.
func (t *OrderedTable) CustomStringWithOrder(order []string, delimiter, frame string) string {
return CustomTable(t.MatrixWithOrder(order), true, delimiter, frame)
}
func (t *OrderedTable) rowCount() int {
if len(t.data) != 0 {
for _, item := range t.data {
return len(item)
}
}
return 0
} | tables/ordered.go | 0.80765 | 0.665608 | ordered.go | starcoder |
package trianglem
import (
"errors"
"fmt"
)
// M represents a triangle matrix.
/*
Some optimizations hold where:
1 2 3 4 1=6=11=16= 🤷♂️
5 6 7 8 2 = -5 7 = -10
9 10 11 12 3 = -9 8 = -11
13 14 15 16 4 = -13 12 = -15
As we can represent the matrix with only half the values, the memory representation is:
data: 2 3 7 4 8 12
size: 4
This might look convoluted, but we are basically incrementig the matirx by adding a (size-1) column.
*/
type M struct {
data []int
size int
}
const def int = 0
// Get the value at position (x,y).
func (t *M) Get(x, y int) int {
if t == nil || x > t.size || y > t.size {
panic("Out of bounds")
}
if x == y {
return def
}
if x < y {
return -t.Get(y, x)
}
return t.data[toDiagCoordinates(x, y)]
}
// Iterate over a column.
func (t *M) Iterate(col int) []int {
if t == nil {
return nil
}
ret := make([]int, t.size)
for y := 0; y < t.size; y++ {
ret[y] = t.Get(col, y)
}
return ret
}
var (
// ErrOutOfBoundsMatrix represents an error when trying to set outside bounds.
ErrOutOfBoundsMatrix = errors.New("can't set on an empty matrix")
// ErrCantSetDiagonal represents an error when trying to set the diagonal.
ErrCantSetDiagonal = errors.New("can't do operations on the diagonal")
)
// Set the value at position (x,y).
func (t *M) Set(x, y, val int) error {
if t == nil {
return ErrOutOfBoundsMatrix
}
if x == y {
return ErrCantSetDiagonal
}
if x < y {
return t.Set(y, x, -val)
}
t.data[toDiagCoordinates(x, y)] = val
return nil
}
// Modify the value at position (x,y) by a given function.
func (t *M) Modify(x, y int, mod func(int) int) error {
return t.Set(x, y, mod(t.Get(x, y)))
}
// String pretty print the matrix.
func (t *M) String() string {
if t == nil {
return "||"
}
ret := ""
for col := 0; col < t.size; col++ {
ret += "|"
for _, v := range t.Iterate(col) {
ret += fmt.Sprintf("|%4d", v)
}
ret += "||\n"
}
return ret
}
// Incr the underlying storage by 1.
func (t *M) Incr() *M {
return t.IncrD(1)
}
// IncrD the underlying storage by any size.
func (t *M) IncrD(delta int) *M {
newSize := delta
if t != nil {
newSize += t.size
}
target := make([]int, realSize(newSize))
if t != nil {
copy(target, t.data)
}
return &M{data: target, size: newSize}
}
func realSize(size int) int {
// Half of ( Area of a square - Diagonal )
// (x² - x) / 2
return (size*size - size) >> 1
}
func toDiagCoordinates(x, y int) int {
return t(x-1) + y
}
func t(n int) int {
// https://oeis.org/A000217
return ((n + 1) * n) >> 1
} | internal/trianglem/matrix.go | 0.720565 | 0.428293 | matrix.go | starcoder |
package runtime
import (
"github.com/golang/protobuf/proto"
)
// StringP returns a pointer to a string whose pointee is same as the given string value.
func StringP(val string) (*string, error) {
return proto.String(val), nil
}
// BoolP parses the given string representation of a boolean value,
// and returns a pointer to a bool whose value is same as the parsed value.
func BoolP(val string) (*bool, error) {
b, err := Bool(val)
if err != nil {
return nil, err
}
return proto.Bool(b), nil
}
// Float64P parses the given string representation of a floating point number,
// and returns a pointer to a float64 whose value is same as the parsed number.
func Float64P(val string) (*float64, error) {
f, err := Float64(val)
if err != nil {
return nil, err
}
return proto.Float64(f), nil
}
// Float32P parses the given string representation of a floating point number,
// and returns a pointer to a float32 whose value is same as the parsed number.
func Float32P(val string) (*float32, error) {
f, err := Float32(val)
if err != nil {
return nil, err
}
return proto.Float32(f), nil
}
// Int64P parses the given string representation of an integer
// and returns a pointer to a int64 whose value is same as the parsed integer.
func Int64P(val string) (*int64, error) {
i, err := Int64(val)
if err != nil {
return nil, err
}
return proto.Int64(i), nil
}
// Int32P parses the given string representation of an integer
// and returns a pointer to a int32 whose value is same as the parsed integer.
func Int32P(val string) (*int32, error) {
i, err := Int32(val)
if err != nil {
return nil, err
}
return proto.Int32(i), err
}
// Uint64P parses the given string representation of an integer
// and returns a pointer to a uint64 whose value is same as the parsed integer.
func Uint64P(val string) (*uint64, error) {
i, err := Uint64(val)
if err != nil {
return nil, err
}
return proto.Uint64(i), err
}
// Uint32P parses the given string representation of an integer
// and returns a pointer to a uint32 whose value is same as the parsed integer.
func Uint32P(val string) (*uint32, error) {
i, err := Uint32(val)
if err != nil {
return nil, err
}
return proto.Uint32(i), err
} | vendor/github.com/kubernetes-incubator/service-catalog/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go | 0.76074 | 0.434461 | proto2_convert.go | starcoder |
package unordered
import ()
// A Comparable can be checked for equality against others of the same underlying type and follows the pattern of Item. You define what a comparable item is.
type Comparable interface {
Equal(Comparable) bool
}
// An EqualSet follows the same patterns as Set but holds items that are comparable to each other. The comparable constraint expands capabilities of the EqualSet with Remove, Reduce, Has, Equal, and Diff.
type EqualSet []Comparable
// Adds a new item to the set. Duplicates are allowed.
func (an EqualSet) Add(the Comparable) EqualSet {
return an.set().Add(the.(Item)).equalset()
}
// Combines items in the receiver set with items of the argument sets into a new set. Duplicates are not removed.
func (an EqualSet) Combine(with ...EqualSet) EqualSet {
return an.set().Combine(setslice(with)...).equalset()
}
// Removes one matching item. Use RemoveAll to remove all matches.
func (an EqualSet) Remove(the Comparable) EqualSet {
if asserting {
if an == nil {
panic("unordered: nil set")
}
if the == nil {
panic("unordered: nil arg")
}
}
out := make(EqualSet, 0, len(an))
found := false
for _, item := range an {
if (found == false) && item.Equal(the) {
found = true
continue
}
out = out.Add(item)
}
return out
}
// Removes all matching items from the set.
func (an EqualSet) RemoveAll(the Comparable) EqualSet {
if asserting {
if an == nil {
panic("unordered: nil set")
}
if the == nil {
panic("unordered: nil arg")
}
}
out := make(EqualSet, 0, len(an))
for _, item := range an {
if item.Equal(the) {
continue
}
out = out.Add(item)
}
return out
}
// Reduces the set by eliminating all duplicate items.
func (an EqualSet) Reduce() EqualSet {
if asserting {
if an == nil {
panic("unordered: nil set")
}
}
out := make(EqualSet, 0, len(an))
for _, item := range an {
if out.Has(item) {
continue
}
out = out.Add(item)
}
return out
}
// If the set has the item then true is returned.
func (an EqualSet) Has(the Comparable) bool {
if asserting {
if an == nil {
panic("unordered: nil set")
}
if the == nil {
panic("unordered: nil arg")
}
}
for _, item := range an {
if the.Equal(item) {
return true
}
}
return false
}
// If both sets contain an equal count of each item then true is returned.
func (an EqualSet) Equal(to EqualSet) bool {
if asserting {
if an == nil {
panic("unordered: nil set")
}
if to == nil {
panic("unordered: nil arg")
}
}
l := len(an)
if l != len(to) {
return false
}
if l == 0 {
return true
}
firstCount := make(map[Comparable]uint)
secondCount := make(map[Comparable]uint)
for _, item := range an {
_, has := firstCount[item]
if has {
firstCount[item]++
continue
}
firstCount[item] = 1
}
for _, item := range to {
_, has := secondCount[item]
if has {
secondCount[item]++
continue
}
secondCount[item] = 1
}
OUTER:
for item, count := range firstCount {
for secondItem, secondCount := range secondCount {
if secondItem.Equal(item) {
if count != secondCount {
return false
}
continue OUTER
}
}
return false
}
return true
}
// Provides a set of the items not in both sets. Duplicates are not removed.
func (an EqualSet) Diff(from EqualSet) EqualSet {
if asserting {
if an == nil {
panic("unordered: nil set")
}
if from == nil {
panic("unordered: nil arg")
}
}
out := make(EqualSet, 0, len(an))
for _, item := range an {
if from.Has(item) == false {
out = out.Add(item)
}
}
for _, item := range from {
if an.Has(item) == false {
out = out.Add(item)
}
}
return out
}
func (an EqualSet) set() Set {
if asserting {
if an == nil {
panic("unordered: nil slice arg")
}
}
out := make(Set, len(an))
for i, item := range an {
out[i] = item.(Item)
}
return out
}
func setslice(the []EqualSet) []Set {
if asserting {
if the == nil {
panic("unordered: nil slice arg")
}
}
out := make([]Set, len(the))
for i, set := range the {
out[i] = set.set()
}
return out
} | equalset.go | 0.755366 | 0.55266 | equalset.go | starcoder |
package query
import (
"config"
"connectordb/datastream"
"github.com/connectordb/pipescript"
"github.com/connectordb/pipescript/transforms" // Load all available transforms
"github.com/connectordb/pipescript/interpolator/interpolators" // Load all available interpolators
)
// Register all of pipescript's standard library of transforms
func init() {
transforms.Register()
interpolators.Register()
}
//TransformArray transforms the given array.
func TransformArray(t *pipescript.Script, dpa *datastream.DatapointArray) (*datastream.DatapointArray, error) {
// ASSUMING THAT THE SCRIPT IS CLEARED OR UNINITIALIZED
// Create an array range from the datapoint array, convert it to pipescript iterator, and set as script input
t.SetInput(&DatapointIterator{datastream.NewDatapointArrayRange(*dpa, 0)})
resultarray := make(datastream.DatapointArray, 0, dpa.Length())
for {
dp, err := t.Next()
if err != nil {
return nil, err
}
if dp == nil {
return &resultarray, nil
}
resultarray = append(resultarray, datastream.Datapoint{Timestamp: dp.Timestamp, Data: dp.Data})
}
}
//ExtendedTransformRange is an ExtendedDataRange which passes data through a transform.
type ExtendedTransformRange struct {
Data datastream.ExtendedDataRange
Transform *pipescript.Script
}
//Index returns the index of the next datapoint in the underlying ExtendedDataRange - it does not guarantee that the datapoint won't be filtered by the
//underlying transforms. It also does not guarantee that it is the correct datapoint, as transforms are free to peek into the data sequence.
func (t *ExtendedTransformRange) Index() int64 {
return t.Data.Index()
}
//Close closes the underlying ExtendedDataRange
func (t *ExtendedTransformRange) Close() {
t.Data.Close()
}
//Next gets the next datapoint
func (t *ExtendedTransformRange) Next() (*datastream.Datapoint, error) {
dp, err := t.Transform.Next()
if err != nil {
return nil, err
}
if dp == nil {
return nil, nil
}
// Convert pipescript datapoint to datastream datapoint
return &datastream.Datapoint{Timestamp: dp.Timestamp, Data: dp.Data}, nil
}
// NextArray is here to fit into the ExtendedDataRange interface - given a batch of data from the underlying
//data store, returns the DatapointArray of transformed data. Since transforms can be filters and have no concept of batching (yet),
// We just get ~250 datapoints the standard way and pretend that's our batch.
// TODO: Use PipeScript batching when available
func (t *ExtendedTransformRange) NextArray() (da *datastream.DatapointArray, err error) {
bs := config.Get().BatchSize
resultarray := make(datastream.DatapointArray, 0, bs)
for i := 0; i < bs; i++ {
dp, err := t.Next()
if err != nil {
return nil, err
}
if dp == nil {
return &resultarray, nil
}
resultarray = append(resultarray, *dp)
}
return &resultarray, nil
}
//NewExtendedTransformRange generates a transform range from a transfrom pipeline
func NewExtendedTransformRange(dr datastream.ExtendedDataRange, transformpipeline string) (*ExtendedTransformRange, error) {
t, err := pipescript.Parse(transformpipeline)
if err != nil {
return nil, err
}
t.SetInput(&DatapointIterator{dr})
return &ExtendedTransformRange{
Data: dr,
Transform: t,
}, nil
}
//TransformRange is ExtendedTransformRange's little brother - it works on DataRanges
type TransformRange struct {
Data datastream.DataRange
Transform *pipescript.Script
}
//Close closes the underlying ExtendedDataRange
func (t *TransformRange) Close() {
t.Data.Close()
}
//Next iterates through a datarange until a datapoint is returned by the transform
func (t *TransformRange) Next() (*datastream.Datapoint, error) {
dp, err := t.Transform.Next()
if err != nil {
return nil, err
}
if dp == nil {
return nil, nil
}
// Convert pipescript datapoint to datastream datapoint
return &datastream.Datapoint{Timestamp: dp.Timestamp, Data: dp.Data}, nil
}
//NewTransformRange generates a transform range from a transfrom pipeline
func NewTransformRange(dr datastream.ExtendedDataRange, transformpipeline string) (*TransformRange, error) {
t, err := pipescript.Parse(transformpipeline)
if err != nil {
return nil, err
}
t.SetInput(&DatapointIterator{dr})
return &TransformRange{
Data: dr,
Transform: t,
}, nil
} | src/connectordb/query/transformrange.go | 0.759939 | 0.502197 | transformrange.go | starcoder |
package mock
var exampleCurrentWeather string = `
[
{
"ApparentTemperature": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 42
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 5.6
}
},
"Ceiling": {
"Imperial": {
"Unit": "ft",
"UnitType": 0,
"Value": 3800
},
"Metric": {
"Unit": "m",
"UnitType": 5,
"Value": 1158
}
},
"CloudCover": 100,
"DewPoint": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 23
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": -4.9
}
},
"EpochTime": 1588076760,
"HasPrecipitation": false,
"IsDayTime": true,
"Link": "http://www.accuweather.com/en/fi/niittykumpu/133030/current-weather/133030?lang=en-us",
"LocalObservationDateTime": "2020-04-28T15:26:00+03:00",
"MobileLink": "http://m.accuweather.com/en/fi/niittykumpu/133030/current-weather/133030?lang=en-us",
"ObstructionsToVisibility": "",
"Past24HourTemperatureDeparture": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": -4
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": -2.5
}
},
"Precip1hr": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"PrecipitationSummary": {
"Past12Hours": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"Past18Hours": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"Past24Hours": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0.01
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0.2
}
},
"Past3Hours": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"Past6Hours": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"Past9Hours": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"PastHour": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
},
"Precipitation": {
"Imperial": {
"Unit": "in",
"UnitType": 1,
"Value": 0
},
"Metric": {
"Unit": "mm",
"UnitType": 3,
"Value": 0
}
}
},
"PrecipitationType": null,
"Pressure": {
"Imperial": {
"Unit": "inHg",
"UnitType": 12,
"Value": 29.65
},
"Metric": {
"Unit": "mb",
"UnitType": 14,
"Value": 1004
}
},
"PressureTendency": {
"Code": "S",
"LocalizedText": "Steady"
},
"RealFeelTemperature": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 32
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": -0.1
}
},
"RealFeelTemperatureShade": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 32
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": -0.1
}
},
"RelativeHumidity": 49,
"Temperature": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 41
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 4.9
}
},
"TemperatureSummary": {
"Past12HourRange": {
"Maximum": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 45
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 7
}
},
"Minimum": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 36
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 2
}
}
},
"Past24HourRange": {
"Maximum": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 46
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 7.7
}
},
"Minimum": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 36
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 2
}
}
},
"Past6HourRange": {
"Maximum": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 45
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 7
}
},
"Minimum": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 41
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 4.9
}
}
}
},
"UVIndex": 1,
"UVIndexText": "Low",
"Visibility": {
"Imperial": {
"Unit": "mi",
"UnitType": 2,
"Value": 10
},
"Metric": {
"Unit": "km",
"UnitType": 6,
"Value": 16.1
}
},
"WeatherIcon": 7,
"WeatherText": "Cloudy",
"WetBulbTemperature": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 34
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 1.2
}
},
"Wind": {
"Direction": {
"Degrees": 158,
"English": "SSE",
"Localized": "SSE"
},
"Speed": {
"Imperial": {
"Unit": "mi/h",
"UnitType": 9,
"Value": 13.3
},
"Metric": {
"Unit": "km/h",
"UnitType": 7,
"Value": 21.4
}
}
},
"WindChillTemperature": {
"Imperial": {
"Unit": "F",
"UnitType": 18,
"Value": 34
},
"Metric": {
"Unit": "C",
"UnitType": 17,
"Value": 1.1
}
},
"WindGust": {
"Speed": {
"Imperial": {
"Unit": "mi/h",
"UnitType": 9,
"Value": 15.9
},
"Metric": {
"Unit": "km/h",
"UnitType": 7,
"Value": 25.6
}
}
}
}
]
` | mock/examples_current_weather.go | 0.723602 | 0.465813 | examples_current_weather.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"math"
"sort"
)
type position struct {
x int
y int
}
func inputToAsteroid(data string) []position {
asteroids := make([]position, 0)
row := 0
col := 0
for i := range data {
if data[i] == 13 {
} else if data[i] == 10 {
row++
col = 0
} else if data[i] == 35 {
asteroids = append(asteroids, position{x: col, y: row})
col++
} else if data[i] == 46 {
col++
}
}
return asteroids
}
func getInput() []byte {
d, err := ioutil.ReadFile("input.txt")
if err != nil {
panic(err)
}
return d
}
func getDistance(a position, b position) float64 {
return math.Sqrt(math.Pow(float64(a.x-b.x), 2) + math.Pow(float64(a.y-b.y), 2))
}
func getAngle(a position, b position) float64 {
angle := math.Atan2(float64(a.x - b.x), float64(a.y - b.y)) * -1
if (angle < 0) {
angle *= -1
angle = (2 * math.Pi) - angle
}
return angle
}
func checkLos(asteroids []position) (int, int) {
numLos := make([]int, len(asteroids))
max := 0
maxIdx := -1
for i := range asteroids {
slopes := make([]float64, 0)
numLos[i] = 0
for j := range asteroids {
if j == i {
continue
}
d := getAngle(asteroids[i], asteroids[j])
bPossible := true
for _,s := range slopes {
if d == s {
bPossible = false
}
}
if bPossible {
slopes = append(slopes, d)
numLos[i]++
}
}
if (numLos[i] > max) {
max = numLos[i]
maxIdx = i
}
}
return maxIdx, max
}
func sweepAndDestroy(asteroids []position, origin position) (destroyed []position) {
targets := make(map[float64][]position)
for _, a := range asteroids {
if a.x == origin.x && a.y == origin.y {
continue
}
// Calculate the angle wrt origin, offsetting by 45 degrees so that directly upwards counts as 0. Also, since radians go counter-clockwise, multiply by -1
angle := getAngle(origin, a)
// Create the map if it's not already there
if _, ok := targets[angle]; !ok {
targets[angle] = make([]position, 0)
}
// Store the target
targets[angle] = append(targets[angle], a)
// Sort ascending from closest to origin
sort.Slice(targets[angle], func(i, j int) bool {
return getDistance(origin, targets[angle][i]) < getDistance(origin, targets[angle][j])
})
}
// Order the angles ascending
order := make([]float64, 0)
for d := range targets {
order = append(order, d)
}
sort.Float64s(order)
// Kill 'em all
i := 0
for len(destroyed) < len(asteroids)-1 {
d := order[i%len(order)]
if len(targets[d]) > 0 {
destroyed = append(destroyed, targets[d][0])
targets[d] = targets[d][1:]
}
i++
}
return destroyed
}
func main() {
data := getInput()
asteroids := inputToAsteroid(string(data))
idx, _ := checkLos(asteroids)
sequence := sweepAndDestroy(asteroids, asteroids[idx])
fmt.Println(sequence[199].x*100 + sequence[199].y)
} | 10/main.go | 0.640523 | 0.472136 | main.go | starcoder |
package shuffle
import (
"math/rand"
"sort"
)
// Interface is a type, typically a collection, that satisfies shuffle.Interface can be
// shuffled by the routines in this package.
type Interface interface {
// Len is the number of elements in the collection.
Len() int
// Swap swaps the elements with indexes i and j.
Swap(i, j int)
}
// Int64Slice attaches the methods of Interface to []int64, sorting in increasing order.
type Int64Slice []int64
func (p Int64Slice) Len() int { return len(p) }
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// SortInt64s sorts a slice of int64s in increasing order.
func SortInt64s(a []int64) { sort.Sort(Int64Slice(a)) }
// Shuffle shuffles Data.
func Shuffle(data Interface) {
n := data.Len()
for i := n - 1; i >= 0; i-- {
j := rand.Intn(i + 1)
data.Swap(i, j)
}
}
// Ints shuffles a slice of ints.
func Ints(a []int) { Shuffle(sort.IntSlice(a)) }
// Int64s shuffles a slice of int64s.
func Int64s(a []int64) { Shuffle(Int64Slice(a)) }
// Float64s shuffles a slice of float64s.
func Float64s(a []float64) { Shuffle(sort.Float64Slice(a)) }
// Strings shuffles a slice of strings.
func Strings(a []string) { Shuffle(sort.StringSlice(a)) }
// A Shuffler provides Shuffle
type Shuffler rand.Rand
// New returns a new Shuffler that uses random values from src
// to shuffle
func New(src rand.Source) *Shuffler { return (*Shuffler)(rand.New(src)) }
// Shuffle shuffles Data.
func (s *Shuffler) Shuffle(data Interface) {
n := data.Len()
for i := n - 1; i >= 0; i-- {
j := (*rand.Rand)(s).Intn(i + 1)
data.Swap(i, j)
}
}
// Ints shuffles a slice of ints.
func (s *Shuffler) Ints(a []int) { s.Shuffle(sort.IntSlice(a)) }
// Float64s shuffles a slice of float64s.
func (s *Shuffler) Float64s(a []float64) { s.Shuffle(sort.Float64Slice(a)) }
// Strings shuffles a slice of strings.
func (s *Shuffler) Strings(a []string) { s.Shuffle(sort.StringSlice(a)) } | vendor/github.com/shogo82148/go-shuffle/shuffle.go | 0.68215 | 0.508544 | shuffle.go | starcoder |
package timex
import (
"fmt"
"time"
)
// Interval describes a time interval between start and end time values
type Interval struct {
start time.Time
end time.Time
}
// NewInterval returns a new instance of Interval between start and end
// Input is order-independent, the smaller value will be used as the start when bigger as the end
func NewInterval(t1, t2 time.Time) Interval {
i := Interval{start: t1, end: t2}
if t2.Before(t1) {
i = Interval{start: t2, end: t1}
}
return i
}
// Start returns i's start value
func (i Interval) Start() time.Time {
return i.start
}
// End returns i's end value
func (i Interval) End() time.Time {
return i.end
}
// Days returns a duration of interval in days
func (i Interval) Days() float64 {
return float64(i.Nanoseconds()) / float64(Day)
}
// Hours returns a duration of interval in hours
func (i Interval) Hours() float64 {
return float64(i.Nanoseconds()) / float64(Hour)
}
// Minutes returns a duration of interval in minutes
func (i Interval) Minutes() float64 {
return float64(i.Nanoseconds()) / float64(Minute)
}
// Seconds returns a duration of interval in seconds
func (i Interval) Seconds() float64 {
return float64(i.Nanoseconds()) / float64(Second)
}
// Milliseconds returns a duration of interval in milliseconds
func (i Interval) Milliseconds() float64 {
return float64(i.Nanoseconds()) / float64(Millisecond)
}
// Microseconds returns a duration of interval in microseconds
func (i Interval) Microseconds() float64 {
return float64(i.Nanoseconds()) / float64(Microsecond)
}
// Nanoseconds returns a duration of interval in hours
func (i Interval) Nanoseconds() int64 {
return i.end.Sub(i.start).Nanoseconds()
}
// IsZero reports whether i's start & end values are zero time values
func (i Interval) IsZero() bool {
return i.start.IsZero() && i.end.IsZero()
}
// Contains reports whether t is within of i (closed interval strategy, start <= t <= end)
func (i Interval) Contains(t time.Time) bool {
return t.UnixNano() >= i.start.UnixNano() && t.UnixNano() <= i.end.UnixNano()
}
// Duration returns i's duration as time.Duration value
func (i Interval) Duration() time.Duration {
return time.Duration(i.Nanoseconds())
}
// String returns string representation of i
func (i Interval) String() string {
return fmt.Sprintf("%s - %s", i.start.Format(time.RFC3339), i.end.Format(time.RFC3339))
}
// StringDates returns string representation of i as date part only
func (i Interval) StringDates() string {
return fmt.Sprintf("%s - %s", i.start.Format("2006-01-02"), i.end.Format("2006-01-02"))
}
// HalfOpenEnd returns i as a half-open interval where the open side is on the end [start, end)
// Actually, it returns copy of i where end = end - 1 nanosecond
func (i Interval) HalfOpenEnd() Interval {
return NewInterval(i.Start(), i.End().Add(-1))
}
// IsValid reports whether i's start & end both values are not zero
func (i Interval) IsValid() bool {
return !i.start.IsZero() && !i.end.IsZero()
}
// ExtendStart extends i from the start back in time adding d in the form of i.Start.Add(-d)
// Example:
// fmt.Println(i.Start()) // 2021-07-03T10:30:00Z
// i.ExtendStart(time.Day * 2)
// fmt.Println(i.Start()) // 2021-07-01T10:30:00Z
func (i *Interval) ExtendStart(d time.Duration) {
i.start = i.start.Add(-d)
}
// ExtendEnd extends i to the end adding d for i.End()
// Example:
// fmt.Println(i.End()) // 2021-07-03T10:30:00Z
// i.ExtendEnd(time.Day * 2)
// fmt.Println(i.End()) // 2021-07-05T10:30:00Z
func (i *Interval) ExtendEnd(d time.Duration) {
i.end = i.end.Add(d)
} | interval.go | 0.82425 | 0.57821 | interval.go | starcoder |
package tensor
import (
"unsafe"
"github.com/lordlarker/nune/internal/slice"
"github.com/lordlarker/nune/internal/utils"
)
// Ravel returns a copy of the Tensor's 1-dimensional data buffer.
func (t *Tensor[T]) Ravel() []T {
return t.storage.Load()
}
// Numel returns the number of elements in the Tensor's data buffer.
func (t *Tensor[T]) Numel() int {
return t.storage.Numel()
}
// Numby returns the size in bytes occupied by all elements
// of the Tensor's underlying data buffer.
func (t *Tensor[T]) Numby() uintptr {
return t.storage.Numby()
}
// Rank returns the Tensor's rank
// (the number of axes in the Tensor's shape).
func (t *Tensor[T]) Rank() int {
return t.layout.Rank()
}
// Shape returns a copy of the Tensor's shape.
func (t *Tensor[T]) Shape() []int {
return slice.Copy(t.layout.Shape())
}
// Strides returns a copy of the Tensor's strides.
func (t *Tensor[T]) Strides() []int {
return slice.Copy(t.layout.Strides())
}
// Size returns the Tensor's total shape size.
// If axis is specified, the number of dimensions at
// that axis is returned.
func (t *Tensor[T]) Size(axis ...int) int {
args := len(axis)
assertArgsBounds(args, 1)
if args == 0 {
return slice.Prod(t.layout.Shape())
} else {
assertAxisBounds(axis[0], t.Rank())
return t.Shape()[axis[0]]
}
}
// MemSize returns the size in bytes occupied by all fields
// that make up the Tensor.
func (t *Tensor[T]) MemSize() uintptr {
var i int
shapeSize := unsafe.Sizeof(i) * uintptr(t.Rank())
stridesSize := unsafe.Sizeof(i) * uintptr(t.Rank())
return t.Numby() + shapeSize + stridesSize
}
// Broadable returns whether or not the Tensor can be
// broadcasted to the given shape.
func (t *Tensor[T]) Broadable(shape ...int) bool {
if utils.Panics(func() {
assertGoodShape(shape...)
assertArgsBounds(len(shape), t.Rank()-1)
}) {
return false
}
var s []int
if t.Rank() < len(shape) {
s = slice.WithLen[int](len(shape))
for i := 0; i < len(shape)-t.Rank(); i++ {
s[i] = 1
}
copy(s[len(shape)-t.Rank():], t.layout.Shape())
} else {
s = t.Shape()
}
for i := 0; i < len(shape); i++ {
if s[i] != shape[i] && s[i] != 1 {
return false
}
}
return true
} | tensor/attr.go | 0.819821 | 0.588919 | attr.go | starcoder |
package ml
import (
"github.com/cpmech/gosl/io"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/plt"
)
// PlotterClass defines a plotter to plot classification data
type PlotterClass struct {
// input
data *Data // x-data
classes []int // y-data
// constants
MgridNpts int // nubmer of poitns for meshgrid (for contours)
// arguments
ArgsYclasses map[int]*plt.A // maps y classes [0, 1, 2, ...] to plot arguments
ArgsCentroids *plt.A // args for centroids
ArgsCircle1 *plt.A // args for centroids
ArgsCircle2 *plt.A // args for centroids
}
// NewPlotterClass returns a new ploter
func NewPlotterClass(data *Data, classes []int, nClasses int) (o *PlotterClass) {
// input
o = new(PlotterClass)
o.data = data
o.classes = classes
// constants
o.MgridNpts = 21
// arguments
o.ArgsYclasses = make(map[int]*plt.A)
for k := 0; k < nClasses; k++ {
o.ArgsYclasses[k] = &plt.A{C: plt.C(k, 0), M: plt.M(k, 2), NoClip: true}
}
o.ArgsCentroids = &plt.A{Ls: "None", M: "*", Ms: 10, Mec: "k", NoClip: true}
o.ArgsCircle1 = &plt.A{M: "o", Void: true, Ms: 13, Mec: "k", Mew: 4.4, NoClip: true}
o.ArgsCircle2 = &plt.A{M: "o", Void: true, Ms: 13, Mec: "w", Mew: 1.3, NoClip: true}
return
}
// Data plots data classes
func (o *PlotterClass) Data(iFeature, jFeature int, binary bool) {
for iSample := 0; iSample < o.data.Nsamples; iSample++ {
k := o.classes[iSample] % len(o.ArgsYclasses)
args := o.ArgsYclasses[k]
ui := o.data.X.Get(iSample, iFeature)
vi := o.data.X.Get(iSample, jFeature)
plt.PlotOne(ui, vi, args)
}
plt.HideTRborders()
plt.Gll(io.Sf("$x_{%d}$", iFeature), io.Sf("$x_{%d}$", jFeature), nil)
}
// Centroids plots centroids of classes
func (o *PlotterClass) Centroids(centroids []la.Vector) {
nClasses := len(centroids)
for i := 0; i < nClasses; i++ {
k := i % len(o.ArgsYclasses)
o.ArgsCentroids.C = o.ArgsYclasses[k].C
u, v := centroids[i][0], centroids[i][1]
plt.PlotOne(u, v, o.ArgsCentroids)
plt.PlotOne(u, v, o.ArgsCircle1)
plt.PlotOne(u, v, o.ArgsCircle2)
plt.Text(u, v, io.Sf("%d", i), &plt.A{Fsz: 8})
}
} | ml/plotclass.go | 0.672869 | 0.412589 | plotclass.go | starcoder |
package pedantic
const (
unicodeLetter = `\p{L}`
unicodeLetterNumber = `[\p{L}\p{N}]`
unicodeC1Control = `\x80-\x9f`
unicodeSurrogateHigh = `\x{d800}-\x{dbff}`
unicodeSurrogateLow = `\x{dc00}-\x{dfff}`
unicodeSurrogate = `\x{d800}-\x{dfff}`
unicodeReplacement = `\x{fffd}`
// every codepoint that is in XXfffe-XXffff is a set of noncharacters.
unicodeNoncharacter = `\x{fdd0}-\x{fdef}\x{00fffe}-\x{00ffff}\x{01fffe}-\x{01ffff}\x{02fffe}-\x{02ffff}\x{03fffe}-\x{03ffff}\x{04fffe}-\x{04ffff}\x{05fffe}-\x{05ffff}\x{06fffe}-\x{06ffff}\x{07fffe}-\x{07ffff}\x{08fffe}-\x{08ffff}\x{09fffe}-\x{09ffff}\x{0afffe}-\x{0affff}\x{0bfffe}-\x{0bffff}\x{0cfffe}-\x{0cffff}\x{0dfffe}-\x{0dffff}\x{0efffe}-\x{0effff}\x{0ffffe}-\x{0fffff}\x{10fffe}-\x{10ffff}`
unicodeBMP = `\xa0-\x{d7ff}\x{e000}-\x{fdcf}\x{fdf0}-\x{fffc}`
unicodeBlock1 = `\x{010000}-\x{01fffd}`
unicodeBlock2 = `\x{020000}-\x{02fffd}`
unicodeBlock3 = `\x{030000}-\x{03fffd}`
unicodeBlock4 = `\x{040000}-\x{04fffd}`
unicodeBlock5 = `\x{050000}-\x{05fffd}`
unicodeBlock6 = `\x{060000}-\x{06fffd}`
unicodeBlock7 = `\x{070000}-\x{07fffd}`
unicodeBlock8 = `\x{080000}-\x{08fffd}`
unicodeBlock9 = `\x{090000}-\x{09fffd}`
unicodeBlock10 = `\x{0a0000}-\x{0afffd}`
unicodeBlock11 = `\x{0b0000}-\x{0bfffd}`
unicodeBlock12 = `\x{0c0000}-\x{0cfffd}`
unicodeBlock13 = `\x{0d0000}-\x{0dfffd}`
unicodeBlock14 = `\x{0e0000}-\x{0efffd}`
unicodeBlock15 = `\x{0f0000}-\x{0ffffd}`
unicodeBlock16 = `\x{100000}-\x{10fffd}`
// skips Surrogate, Replacement and Noncharacters (U+FDD0-U+FDEF and U+xxFFF0-U+xxFFFF)
unicodeInvalid = unicodeSurrogate + unicodeReplacement + unicodeNoncharacter
// skips above and C1 Control
unicodeExclude = unicodeC1Control + unicodeInvalid
unicodeNonASCII = unicodeBMP + unicodeBlock1 + unicodeBlock2 + unicodeBlock3 + unicodeBlock4 + unicodeBlock5 + unicodeBlock6 + unicodeBlock7 + unicodeBlock8 + unicodeBlock9 + unicodeBlock10 + unicodeBlock11 + unicodeBlock12 + unicodeBlock13 + unicodeBlock14 + unicodeBlock15 + unicodeBlock16
) | unicode.go | 0.525856 | 0.445288 | unicode.go | starcoder |
package pointer
import "time"
// Bool creates a pointer to the provided boolean value
func Bool(v bool) *bool { return &v }
// Uint creates a pointer to the provided uint value
func Uint(v uint) *uint { return &v }
// Uint8 creates a pointer to the provided uint8 value
func Uint8(v uint8) *uint8 { return &v }
// Uint16 creates a pointer to the provided uint16 value
func Uint16(v uint16) *uint16 { return &v }
// Uint32 creates a pointer to the provided uint32 value
func Uint32(v uint32) *uint32 { return &v }
// Uint64 creates a pointer to the provided uint64 value
func Uint64(v uint64) *uint64 { return &v }
// Uintptr creates a pointer to the provided uintptr value
func Uintptr(v uintptr) *uintptr { return &v }
// Int creates a pointer to the provided int value
func Int(v int) *int { return &v }
// Int8 creates a pointer to the provided int8 value
func Int8(v int8) *int8 { return &v }
// Int16 creates a pointer to the provided int16 value
func Int16(v int16) *int16 { return &v }
// Int32 creates a pointer to the provided int32 value
func Int32(v int32) *int32 { return &v }
// Int64 creates a pointer to the provided int64 value
func Int64(v int64) *int64 { return &v }
// Float32 creates a pointer to the provided float32 value
func Float32(v float32) *float32 { return &v }
// Float64 creates a pointer to the provided float64 value
func Float64(v float64) *float64 { return &v }
// Rune creates a pointer to the provided rune value
func Rune(v rune) *rune { return &v }
// Byte creates a pointer to the provided byte value
func Byte(v byte) *byte { return &v }
// String creates a pointer to the provided string value
func String(v string) *string { return &v }
// Complex64 creates a pointer to the provided complex64 value
func Complex64(v complex64) *complex64 { return &v }
// Complex128 creates a pointer to the provided complex128 value
func Complex128(v complex128) *complex128 { return &v }
// Interface creates a pointer to the provided interface{} value
func Interface(v interface{}) *interface{} { return &v }
// Duration creates a pointer to the provided time.Duration value
func Duration(v time.Duration) *time.Duration { return &v }
// Time creates a pointer to the provided time.Time value
func Time(v time.Time) *time.Time { return &v } | pointer.go | 0.705785 | 0.432603 | pointer.go | starcoder |
package matrix
import (
"fmt"
"github.com/kieron-pivotal/rays/tuple"
)
type Matrix struct {
rows int
cols int
values []float64
}
func New(rows, cols int, vals ...float64) Matrix {
valsCopy := make([]float64, rows*cols)
copy(valsCopy, vals)
m := Matrix{
rows: rows,
cols: cols,
values: valsCopy,
}
return m
}
func (m Matrix) Rows() int {
return m.rows
}
func (m Matrix) Cols() int {
return m.cols
}
func (m Matrix) Get(r, c int) float64 {
if r > m.rows-1 || c > m.cols-1 {
panic(fmt.Sprintf("row %d, col %d not contained in a %dx%d matrix", r, c, m.rows, m.cols))
}
idx := r*m.cols + c
if idx > len(m.values)-1 {
return 0.0
}
return m.values[idx]
}
func (m Matrix) Set(r, c int, v float64) {
m.values[r*m.cols+c] = v
}
func (m Matrix) Equals(n Matrix) bool {
if m.rows != n.rows || m.cols != n.cols {
return false
}
for r := 0; r < m.rows; r++ {
for c := 0; c < m.cols; c++ {
if !floatEquals(m.Get(r, c), n.Get(r, c)) {
return false
}
}
}
return true
}
func floatEquals(a, b float64) bool {
const EPSILON = 0.00001
diff := a - b
if diff < 0 {
diff *= -1
}
return diff < EPSILON
}
func (m Matrix) Multiply(n Matrix) Matrix {
out := New(m.rows, n.cols)
for r := 0; r < m.rows; r++ {
for c := 0; c < n.cols; c++ {
var v float64
for i := 0; i < m.cols; i++ {
v += out.values[r*out.cols+c] + m.values[r*m.cols+i]*n.values[i*n.cols+c]
}
out.Set(r, c, v)
}
}
return out
}
func (m Matrix) TupleMultiply(t tuple.Tuple) tuple.Tuple {
c := m.cols
x := m.values[0]*t.X + m.values[1]*t.Y + m.values[2]*t.Z + m.values[3]*t.W
y := m.values[c]*t.X + m.values[c+1]*t.Y + m.values[c+2]*t.Z + m.values[c+3]*t.W
z := m.values[2*c+0]*t.X + m.values[2*c+1]*t.Y + m.values[2*c+2]*t.Z + m.values[2*c+3]*t.W
w := m.values[3*c+0]*t.X + m.values[3*c+1]*t.Y + m.values[3*c+2]*t.Z + m.values[3*c+3]*t.W
return tuple.Tuple{X: x, Y: y, Z: z, W: w}
}
func Identity(r, c int) Matrix {
m := New(r, c)
for i := 0; i < r; i++ {
m.Set(i, i, 1)
}
return m
}
func (m Matrix) Transpose() Matrix {
t := New(m.cols, m.rows)
for c := 0; c < m.rows; c++ {
for r := 0; r < m.cols; r++ {
t.Set(r, c, m.Get(c, r))
}
}
return t
}
func (m Matrix) Determinant() float64 {
if m.rows == 2 && m.cols == 2 {
return m.Get(0, 0)*m.Get(1, 1) - m.Get(0, 1)*m.Get(1, 0)
}
det := float64(0)
for i := 0; i < m.cols; i++ {
det += m.Get(0, i) * m.Cofactor(0, i)
}
return det
}
func (m Matrix) Submatrix(r, c int) Matrix {
o := New(m.rows-1, m.cols-1)
for i := 0; i < m.rows; i++ {
for j := 0; j < m.cols; j++ {
if i == r || j == c {
continue
}
row := i
if row > r {
row--
}
col := j
if col > c {
col--
}
o.Set(row, col, m.Get(i, j))
}
}
return o
}
func (m Matrix) Minor(r, c int) float64 {
return m.Submatrix(r, c).Determinant()
}
func (m Matrix) Cofactor(r, c int) float64 {
min := m.Minor(r, c)
if (r+c)%2 == 1 {
min *= -1
}
return min
}
func (m Matrix) IsInvertible() bool {
return m.Determinant() != 0.0
}
func (m Matrix) Inverse() Matrix {
if !m.IsInvertible() {
panic("matrix is not invertible")
}
det := m.Determinant()
n := New(m.rows, m.cols)
for r := 0; r < m.rows; r++ {
for c := 0; c < m.cols; c++ {
n.Set(c, r, m.Cofactor(r, c)/det)
}
}
return n
} | matrix/matrix.go | 0.724578 | 0.591428 | matrix.go | starcoder |
package gifbounce
import (
"math"
"github.com/sgreben/yeetgif/pkg/box2d"
)
type World struct {
*Params
Box2d *box2d.World
Things struct {
Dynamic []*Thing
Static []*Thing
}
}
func (w *World) ContainsDynamicThings(aabb box2d.AABB) bool {
found := false
w.Box2d.QueryAABB(func(fixture *box2d.Fixture) bool {
found = fixture.Body.Type == box2d.BodyTypeDynamicBody
return !found
}, aabb)
return found
}
func (w *World) Step(t float64) {
for _, thing := range w.Things.Dynamic {
if thing.Fixture.Body.IsActive() {
continue
}
if t >= thing.Initial.Time {
thing.Fixture.Body.SetActive(true)
thing.Fixture.Body.SetAwake(true)
}
}
w.Worker(len(w.Things.Static), func(i int) {
w.Things.Static[i].Step(t)
w.Things.Static[i].Record()
})
w.Worker(len(w.Things.Dynamic), func(i int) {
w.Things.Dynamic[i].Step(t)
w.Things.Dynamic[i].Record()
})
w.Box2d.Step(w.Solver.TimeStep(t), w.Solver.VelocityIterations, w.Solver.PositionIterations)
}
type Recording struct {
Active []bool
Frames []int
Angles []float64
WorldCenters []box2d.Point
LocalCenters []box2d.Point
Bounds []box2d.AABB
}
func (r *Recording) PadRightTo(k int) {
k -= r.Len()
if k < 0 {
return
}
r.Active = append(append([]bool(nil), r.Active...), make([]bool, k)...)
r.Frames = append(append([]int(nil), r.Frames...), make([]int, k)...)
r.Angles = append(append([]float64(nil), r.Angles...), make([]float64, k)...)
r.WorldCenters = append(append([]box2d.Point(nil), r.WorldCenters...), make([]box2d.Point, k)...)
r.LocalCenters = append(append([]box2d.Point(nil), r.LocalCenters...), make([]box2d.Point, k)...)
r.Bounds = append(append([]box2d.AABB(nil), r.Bounds...), make([]box2d.AABB, k)...)
}
func (r *Recording) Slice(i, j int) Recording {
return Recording{
Active: r.Active[i:j],
Frames: r.Frames[i:j],
Angles: r.Angles[i:j],
WorldCenters: r.WorldCenters[i:j],
LocalCenters: r.LocalCenters[i:j],
Bounds: r.Bounds[i:j],
}
}
func (r *Recording) Len() int {
return len(r.WorldCenters)
}
func (r *Recording) Record(active bool, frame int, angleDeg float64, worldCenter, localCenter box2d.Point, aabb box2d.AABB) {
r.Active = append(r.Active, active)
r.Frames = append(r.Frames, frame)
r.Angles = append(r.Angles, angleDeg)
r.WorldCenters = append(r.WorldCenters, worldCenter)
r.LocalCenters = append(r.LocalCenters, localCenter)
r.Bounds = append(r.Bounds, aabb)
}
type Thing struct {
*ThingParams
Shape *box2d.PolygonShape
Fixture *box2d.Fixture
Frame int
Static bool
Recording Recording
}
func (t *Thing) Step(time float64) {
if t.Friction != nil {
t.Fixture.Friction = t.Friction(time)
}
if t.Bounciness != nil {
t.Fixture.Restitution = t.Bounciness(time)
}
if t.LinearDamping != nil {
t.Fixture.Body.LinearDamping = t.LinearDamping(time)
}
if t.AngularDamping != nil {
t.Fixture.Body.AngularDamping = t.AngularDamping(time)
}
t.Frame++
if t.Frame >= len(t.Polygons) {
t.Frame = 0
}
t.Shape.Set(t.Polygons[t.Frame])
}
func (t *Thing) WorldCenter() box2d.Point {
if t.Static {
return t.Fixture.GetAABB(0).GetCenter()
}
return t.Fixture.Body.GetWorldCenter()
}
func (t *Thing) LocalCenter() box2d.Point {
body := t.Fixture.Body
if t.Static {
pos := body.GetPosition()
bounds := t.Fixture.GetAABB(0)
return box2d.Point{
X: pos.X - bounds.Min.X,
Y: pos.Y - bounds.Min.Y,
}
}
return t.Fixture.Body.GetLocalCenter()
}
func (t *Thing) Record() {
body := t.Fixture.Body
angleDeg := body.GetAngle() * 180.0 / math.Pi
worldCenter, localCenter := t.WorldCenter(), t.LocalCenter()
bounds := t.Fixture.GetAABB(0)
t.Recording.Record(body.IsActive(), t.Frame, angleDeg, worldCenter, localCenter, bounds.Clone())
} | pkg/gifbounce/world.go | 0.629775 | 0.438485 | world.go | starcoder |
package main
/*
This is a test module that does the following:
1) Creates an OpenGL window
2) Creates an RGB texture from noise described in a JSON config file
3) Displays the noise as a texture on a plane in the window
It requires the GLFW3 and GLEW libraries as well as the Go wrappers
for them: go-gl/gl and go-gl/glfw3.
Basic build instructions are:
go get github.com/go-gl/gl/v3.3-core/gl
go get github.com/go-gl/glfw/v3.1/glfw
go get github.com/tbogdala/noisey
cd $GOHOME/src/github.com/tbogdala/noisey/examples
./build.sh
./noise_from_json_gl
Hit `esc` to quit the program.
Hit `r` to reload the JSON file and compute the noise again!
Hit `c` to toggle the colorize effect.
*/
import (
"fmt"
gl "github.com/go-gl/gl/v3.3-core/gl"
glfw "github.com/go-gl/glfw/v3.1/glfw"
mgl "github.com/go-gl/mathgl/mgl32"
"github.com/tbogdala/noisey"
"io/ioutil"
"math"
"math/rand"
)
var (
configFilename = "noise.json"
noiseBank *noisey.NoiseJSON = nil
noiseTex uint32
colorizeEnabled bool = true
imageSize = int32(512)
app *ExampleApp
plane *Renderable
)
func keyCallback(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) {
if key == glfw.KeyEscape && action == glfw.Press {
w.SetShouldClose(true)
}
if key == glfw.KeyR && action == glfw.Press {
fmt.Println("Reloading noise bank from JSON file...")
loadJSONFile()
randomPixels := generateNoiseImage(imageSize)
gl.BindTexture(gl.TEXTURE_2D, noiseTex)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, imageSize, imageSize, 0, gl.RGB, gl.UNSIGNED_BYTE, gl.Ptr(randomPixels))
}
if key == glfw.KeyC && action == glfw.Press {
colorizeEnabled = !colorizeEnabled
if colorizeEnabled {
fmt.Println("Colorizing the noise according to a gradient ...")
} else {
fmt.Println("Displaying noise as a grayscale image ...")
}
randomPixels := generateNoiseImage(imageSize)
gl.BindTexture(gl.TEXTURE_2D, noiseTex)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, imageSize, imageSize, 0, gl.RGB, gl.UNSIGNED_BYTE, gl.Ptr(randomPixels))
}
}
// createTextureFromRGB makes an OpenGL texture and buffers the RGB data into it
func createTextureFromRGB(rgb []byte, imageSize int32) (tex uint32) {
gl.GenTextures(1, &tex)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, tex)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT)
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, imageSize, imageSize, 0, gl.RGB, gl.UNSIGNED_BYTE, gl.Ptr(rgb))
return tex
}
func generateNoiseImage(imageSize int32) []byte {
// create the fractal Brownian motion generator based on perlin
fbmPerlin := noiseBank.GetGenerator("basic")
// make an pixel image by calculating random noise and creating
// an RGB byte triplet array based off the scaled noise value
builder := noisey.NewBuilder2D(fbmPerlin, int(imageSize), int(imageSize))
builder.Bounds = noisey.Builder2DBounds{0.0, 0.0, float64(imageSize) * 0.01, float64(imageSize) * 0.01}
builder.Build()
colors := make([]byte, imageSize*imageSize*3)
for y := 0; y < builder.Height; y++ {
for x := 0; x < builder.Width; x++ {
v := builder.Values[(y*builder.Width)+x]
b := byte(math.Floor((v*0.5 + 0.5) * 255)) // normalize 0..1 then scale by 255
colorIndex := y*int(imageSize)*3 + x*3
if colorizeEnabled {
if b > 250 { // snow
colors[colorIndex] = 255
colors[colorIndex+1] = 255
colors[colorIndex+2] = 255
} else if b > 190 { // rock
colors[colorIndex] = 128
colors[colorIndex+1] = 128
colors[colorIndex+2] = 128
} else if b > 160 { // dirt
colors[colorIndex] = 224
colors[colorIndex+1] = 224
colors[colorIndex+2] = 0
} else if b > 130 { // grass
colors[colorIndex] = 32
colors[colorIndex+1] = 160
colors[colorIndex+2] = 0
} else if b > 125 { // sand
colors[colorIndex] = 240
colors[colorIndex+1] = 240
colors[colorIndex+2] = 64
} else if b > 120 { // shore
colors[colorIndex] = 0
colors[colorIndex+1] = 128
colors[colorIndex+2] = 255
} else if b > 32 { // shallow
colors[colorIndex] = 0
colors[colorIndex+1] = 0
colors[colorIndex+2] = 255
} else { // deeps
colors[colorIndex] = 0
colors[colorIndex+1] = 0
colors[colorIndex+2] = 128
}
} else {
colors[colorIndex] = b
colors[colorIndex+1] = b
colors[colorIndex+2] = b
}
}
}
return colors
}
func loadJSONFile() {
// load the actual JSON configuration file
fmt.Printf("Loading JSON configuration file bytes...\n")
bytes, err := ioutil.ReadFile(configFilename)
if err != nil {
panic(err)
}
fmt.Printf("Parsing the JSON ...\n")
noiseBank, err = noisey.LoadNoiseJSON(bytes)
if err != nil {
panic(err)
}
fmt.Printf("Parsing complete!\n")
// build the sources from the JSON file
err = noiseBank.BuildSources(func(s int64) noisey.RandomSource {
return rand.New(rand.NewSource(int64(s)))
})
if err != nil {
panic(err)
}
err = noiseBank.BuildGenerators()
if err != nil {
panic(err)
}
}
func renderCallback(delta float64) {
gl.Viewport(0, 0, int32(app.Width), int32(app.Height))
gl.ClearColor(0.0, 0.0, 0.0, 1.0)
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
// make the projection and view matrixes
projection := mgl.Ident4()
view := mgl.Ident4()
plane.Draw(projection, view)
}
func main() {
app = NewApp()
app.InitGraphics("Noisey Perlin Test", 768, 768)
app.SetKeyCallback(keyCallback)
app.OnRender = renderCallback
// compile our shader
var err error
textureShader, err := LoadShaderProgram(UnlitTextureVertShader, UnlitTextureFragShader)
if err != nil {
panic("Failed to compile the shader! " + err.Error())
}
// load the JSON configuration file
loadJSONFile()
// create the plane to draw as a test
plane = CreatePlaneXY(-0.75, -0.75, 0.75, 0.75, 1.0)
plane.Shader = textureShader
// generate the noise and make a image
randomPixels := generateNoiseImage(imageSize)
noiseTex = createTextureFromRGB(randomPixels, imageSize)
plane.Tex0 = noiseTex
app.RenderLoop()
} | examples/noise_from_json_gl.go | 0.711732 | 0.44565 | noise_from_json_gl.go | starcoder |
package chipmunk
import (
"github.com/Dethrail/chipmunk/transform"
"github.com/Dethrail/chipmunk/vect"
)
// Convenience wrapper around PolygonShape.
type BoxShape struct {
Shape *Shape
// The polygon that represents this box. Do not touch!
Polygon *PolygonShape
verts [4]vect.Vect
// The width of the box. Call UpdatePoly() if changed.
Width vect.Float
// The height of the box. Call UpdatePoly() if changed.
Height vect.Float
// The center of the box. Call UpdatePoly() if changed.
Position vect.Vect
}
// Creates a new BoxShape with given position, width and height.
func NewBox(pos vect.Vect, w, h vect.Float) *Shape {
shape := newShape()
box := &BoxShape{
Polygon: &PolygonShape{Shape: shape},
Width: w,
Height: h,
Position: pos,
Shape: shape,
}
hw := w / 2.0
hh := h / 2.0
if hw < 0 {
hw = -hw
}
if hh < 0 {
hh = -hh
}
box.verts = [4]vect.Vect{
{-hw, -hh},
{-hw, hh},
{hw, hh},
{hw, -hh},
}
poly := box.Polygon
poly.SetVerts(box.verts[:], box.Position)
shape.ShapeClass = box
return shape
}
func (box *BoxShape) Moment(mass float32) vect.Float {
return (vect.Float(mass) * (box.Width*box.Width + box.Height*box.Height) / 12.0)
}
// Recalculates the internal Polygon with the Width, Height and Position.
func (box *BoxShape) UpdatePoly() {
hw := box.Width / 2.0
hh := box.Height / 2.0
if hw < 0 {
hw = -hw
}
if hh < 0 {
hh = -hh
}
box.verts = [4]vect.Vect{
{-hw, -hh},
{-hw, hh},
{hw, hh},
{hw, -hh},
}
poly := box.Polygon
poly.SetVerts(box.verts[:], box.Position)
}
// Returns ShapeType_Box. Needed to implemet the ShapeClass interface.
func (box *BoxShape) ShapeType() ShapeType {
return ShapeType_Box
}
// Returns ShapeType_Box. Needed to implemet the ShapeClass interface.
func (box *BoxShape) Clone(s *Shape) ShapeClass {
clone := *box
clone.Polygon = &PolygonShape{Shape: s}
clone.Shape = s
clone.UpdatePoly()
return &clone
}
// Recalculates the transformed vertices, axes and the bounding box.
func (box *BoxShape) update(xf transform.Transform) AABB {
return box.Polygon.update(xf)
}
// Returns true if the given point is located inside the box.
func (box *BoxShape) TestPoint(point vect.Vect) bool {
return box.Polygon.TestPoint(point)
} | chipmunk/boxShape.go | 0.837088 | 0.563798 | boxShape.go | starcoder |
package main
import (
"fmt"
"math"
)
// OpenShape je uživatelsky definovaná datová struktura
// představující otevřené geometrické tvary (úsečka, oblouk, křivka)
type OpenShape interface {
length() float64
}
// ClosedShape je uživatelsky definovaná datová struktura
// představující uzavřené geometrické tvary (úsečka, oblouk, křivka)
type ClosedShape interface {
area() float64
}
func length(shape OpenShape) float64 {
return shape.length()
}
func area(shape ClosedShape) float64 {
return shape.area()
}
// Line je uživatelsky definovaná datová struktura
// představující úsečku z bodu [x1, y1] do bodu [x2, y2]
type Line struct {
x1, y1 float64
x2, y2 float64
}
// Circle je uživatelsky definovaná datová struktura
// představující kružnici se středem v bodě [x, y]
// a poloměrem radius
type Circle struct {
x, y float64
radius float64
}
// Ellipse je uživatelsky definovaná datová struktura
// představující elipsu se středem v bodě [x, y]
// a poloměrem poloos a a b
type Ellipse struct {
x, y float64
a, b float64
}
// Rectangle je uživatelsky definovaná datová struktura
// představující geometrický tvar obdélníka
type Rectangle struct {
x, y float64
width, height float64
}
func (line Line) length() float64 {
return math.Hypot(line.x1-line.x2, line.y1-line.y2)
}
func (rect Rectangle) area() float64 {
return rect.width * rect.height
}
func (circle Circle) area() float64 {
return math.Pi * circle.radius * circle.radius
}
func (ellipse Ellipse) area() float64 {
return math.Pi * ellipse.a * ellipse.b
}
func main() {
line1 := Line{x1: 0, y1: 0, x2: 100, y2: 100}
fmt.Println("Line")
fmt.Println(line1)
fmt.Println(length(line1))
fmt.Println(line1.length())
fmt.Println()
fmt.Println("Rectangle")
r := Rectangle{x: 0, y: 0, width: 100, height: 100}
fmt.Println(r)
fmt.Println(area(r))
fmt.Println(r.area())
fmt.Println()
fmt.Println("Circle")
c := Circle{x: 0, y: 0, radius: 100}
fmt.Println(c)
fmt.Println(area(c))
fmt.Println(c.area())
fmt.Println()
fmt.Println("Ellipse")
e := Ellipse{x: 0, y: 0, a: 100, b: 50}
fmt.Println(e)
fmt.Println(area(e))
fmt.Println(e.area())
fmt.Println()
} | article_04/08_more_implementations.go | 0.549399 | 0.581244 | 08_more_implementations.go | starcoder |
package ngin
import (
"bufio"
"bytes"
)
const (
szKB = 1 << 10
szMB = 1 << 20
szGB = 1 << 30
szHeader = 16
szPage = 4 * szKB
)
// alignBytes takes n number of input bytes (should be length of the data) and returns
// a page aligned byte count, making sure to include the header in the calculation.
func alignBytes(n int) uint64 {
if n > 0 {
return uint64(((n + szHeader) + szPage - 1) &^ (szPage - 1))
}
return uint64(szPage)
}
// alignPages takes n number of input bytes (should be length of the data) and returns
// a page aligned page count, making sure to include the header in the calculation.
func alignPages(n int) uint16 {
return uint16(alignBytes(n) / szPage)
}
// align takes n number of input bytes (should be length of the data) and returns
// both a page aligned byte count, and a page aligned page count, making sure to
// include the header in the calculation.
func align(n int) (uint64, uint16) {
return alignBytes(n), alignPages(n)
}
// header represents a the header of a data record. It stores the status of the
// record, a magic byte, an extra uint16 marker, a page count marker, the length
// of the actual data in the record and the padding size to page align the record.
type header struct {
status byte // 0 - free, 1 - active, 2 - deleted
magic byte // magic is currently unused, but was put there for future use (in case)
extra uint16 // extra is currently unused, but was put there for future use (in case)
pages uint16 // number of aligned pages, 65535 pages is the max (255mb)
length uint64 // total length of record in bytes, 268431360 (not bound by uint64 type) bytes is the max (255mb)
padding uint16 // number of bytes to pad after the header and raw data
}
// record represents a data record. It has a small header that stores the size
// of the record as well as some other markers for empty, deleted, etc. It may
// vary in size, but will always be perfectly page aligned. The header bytes
// (including the padding) occupy 16 bytes in addition to the raw data itself.
type record struct {
*header // embedded header
data []byte // actual data
}
// newHeader takes the length of the raw data, known as 'dl', and creates
// and returns a new filled header struct based on provided data length
func newHeader(dl int) *header {
abc, apc := align(dl)
return &header{
status: byte(1),
magic: byte(0),
extra: uint16(0),
pages: uint16(apc),
length: uint64(dl),
padding: uint16(abc - uint64(dl+szHeader)),
}
}
func newRecord(b []byte) *record {
return &record{
header: newHeader(len(b)),
data: b,
}
}
func (r *record) MarshalBinary() ([]byte, error) {
var buf bytes.Buffer
w := NewWriter(bufio.NewWriter(&buf))
// write header
err := w.WriteByte(r.status)
if err != nil {
return nil, err
}
err = w.WriteByte(r.magic)
if err != nil {
return nil, err
}
err = w.WriteUint16(r.extra)
if err != nil {
return nil, err
}
err = w.WriteUint16(r.pages)
if err != nil {
return nil, err
}
err = w.WriteUint64(r.length)
if err != nil {
return nil, err
}
err = w.WriteUint16(r.padding)
if err != nil {
return nil, err
}
// write data
err = w.WriteBytes(r.data)
if err != nil {
return nil, err
}
// write padding
err = w.WriteBytes(make([]byte, r.padding, r.padding))
if err != nil {
return nil, err
}
err = w.Flush()
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (r *record) UnmarshalBinary(data []byte) error {
buf := bytes.NewBuffer(data)
rd := NewReader(bufio.NewReader(buf))
// read header
var err error
r.header.status, err = rd.ReadByte()
if err != nil {
return err
}
// TODO: CONTINUE FROM HERE
return nil
err = w.WriteByte(r.magic)
if err != nil {
return err
}
err = w.WriteUint16(r.extra)
if err != nil {
return err
}
err = w.WriteUint16(r.pages)
if err != nil {
return err
}
err = w.WriteUint64(r.length)
if err != nil {
return err
}
err = w.WriteUint16(r.padding)
if err != nil {
return err
}
// write data
err = w.WriteBytes(r.data)
if err != nil {
return err
}
// write padding
err = w.WriteBytes(make([]byte, r.padding, r.padding))
if err != nil {
return err
}
err = w.Flush()
if err != nil {
return err
}
return nil
} | pkg/ngin/_record.go | 0.591605 | 0.450662 | _record.go | starcoder |
package logicalpermissions
type LogicalPermissionsInterface interface {
/**
* Adds a permission type.
* @param {string} name - The name of the permission type
* @param {func(string, map[string]interface{}) (bool, error)} callback - The callback that evaluates the permission type. Upon calling CheckAccess() the registered callback will be passed two parameters: a permission string (such as a role) and the context map passed to CheckAccess(). The permission will always be a single string even if for example multiple roles are accepted. In that case the callback will be called once for each role that is to be evaluated. The callback should return a boolean which determines whether access should be granted. It should also return an error, or nil if no error occurred.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
AddType(name string, callback func(string, map[string]interface{}) (bool, error)) error
/**
* Removes a permission type.
* @param {string} name - The name of the permission type.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
RemoveType(name string) error
/**
* Checks whether a permission type is registered.
* @param {string} name - The name of the permission type.
* @returns {bool} true if the type is found or false if the type isn't found.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
TypeExists(name string) (bool, error)
/**
* Gets the callback for a permission type.
* @param {string} name - The name of the permission type.
* @returns {func(string, map[string]interface{}) (bool, error)} Callback for the permission type.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
GetTypeCallback(name string) (func(string, map[string]interface{}) (bool, error), error)
/**
* Changes the callback for an existing permission type.
* @param {string} name - The name of the permission type.
* @param {func(string, map[string]interface{}) (bool, error)} callback - The callback that evaluates the permission type. Upon calling CheckAccess() the registered callback will be passed two parameters: a permission string (such as a role) and the context map passed to CheckAccess(). The permission will always be a single string even if for example multiple roles are accepted. In that case the callback will be called once for each role that is to be evaluated. The callback should return a boolean which determines whether access should be granted. It should also return an error, or nil if no error occurred.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
SetTypeCallback(name string, callback func(string, map[string]interface{}) (bool, error)) error
/**
* Gets all defined permission types.
* @returns {map[string]func(string, map[string]interface{}) (bool, error)} permission types with the structure {"name": callback, "name2": callback2, ...}. This map is shallow copied.
*/
GetTypes() map[string]func(string, map[string]interface{}) (bool, error)
/**
* Overwrites all defined permission types.
* @param {map[string]func(string, map[string]interface{}} types - permission types with the structure {"name": callback, "name2": callback2, ...}. This map is shallow copied.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
SetTypes(types map[string]func(string, map[string]interface{}) (bool, error)) error
/**
* Gets the current bypass access callback.
* @returns {func(map[string]interface{}) (bool, error)} callback for checking access bypass.
*/
GetBypassCallback() func(map[string]interface{}) (bool, error)
/**
* Sets the bypass access callback.
* @param {func(map[string]interface{}) (bool, error)} callback - The callback that evaluates access bypassing. Upon calling CheckAccess() the registered bypass callback will be passed one parameter, which is the context map passed to CheckAccess(). It should return a boolean which determines whether bypass access should be granted. It should also return an error, or nil if no error occurred.
*/
SetBypassCallback(callback func(map[string]interface{}) (bool, error))
/**
* Gets all keys that can be part of a permission tree.
* @returns []string valid permission keys
*/
GetValidPermissionKeys() []string
/**
* Checks access for a permission tree.
* @param {interface{}} permissions - The permission tree to be evaluated. The permission tree can either be a map[string]interface{} or a string containing a json object. It also accepts a slice, a boolean string or a real boolean.
* @param {map[string]interface{}} context - A context map that could for example contain the evaluated user and document.
* @returns {bool} true if access is granted or false if access is denied.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
CheckAccess(permissions interface{}, context map[string]interface{}) (bool, error)
/**
* Checks access for a permission tree while explicitly disallowing access bypass.
* @param {interface{}} permissions - The permission tree to be evaluated. The permission tree can either be a map[string]interface{} or a string containing a json object. It also accepts a slice, a boolean string or a real boolean.
* @param {map[string]interface{}} context - A context map that could for example contain the evaluated user and document.
* @returns {bool} true if access is granted or false if access is denied.
* @returns {error} if something goes wrong, or nil if no error occurs.
*/
CheckAccessNoBypass(permissions interface{}, context map[string]interface{}) (bool, error)
} | logicalpermissionsinterface.go | 0.899678 | 0.491151 | logicalpermissionsinterface.go | starcoder |
package utils
import (
"errors"
"fmt"
"math"
)
// NodeActivationType defines the type of activation function to use for the neuron node
type NodeActivationType byte
// The neuron Activation function Types
const (
// The sigmoid activation functions
SigmoidPlainActivation NodeActivationType = iota + 1
SigmoidReducedActivation
SigmoidBipolarActivation
SigmoidSteepenedActivation
SigmoidApproximationActivation
SigmoidSteepenedApproximationActivation
SigmoidInverseAbsoluteActivation
SigmoidLeftShiftedActivation
SigmoidLeftShiftedSteepenedActivation
SigmoidRightShiftedSteepenedActivation
// The other activators assortment
TanhActivation
GaussianBipolarActivation
LinearActivation
LinearAbsActivation
LinearClippedActivation
NullActivation
SignActivation
SineActivation
StepActivation
// The modular activators (with multiple inputs/outputs)
MultiplyModuleActivation
MaxModuleActivation
MinModuleActivation
)
// The neuron node activation function type
type ActivationFunction func(float64, []float64) float64
// The neurons module activation function type
type ModuleActivationFunction func([]float64, []float64) []float64
// The default node activators factory reference
var NodeActivators = NewNodeActivatorsFactory()
// The factory to provide appropriate neuron node activation function
type NodeActivatorsFactory struct {
// The map of registered neuron node activators by type
activators map[NodeActivationType]ActivationFunction
// The map of registered neurons module activators by type
moduleActivators map[NodeActivationType]ModuleActivationFunction
// The forward and inverse maps of activator type and function name
forward map[NodeActivationType]string
inverse map[string]NodeActivationType
}
// Returns node activator factory initialized with default activation functions
func NewNodeActivatorsFactory() *NodeActivatorsFactory {
af := &NodeActivatorsFactory{
activators:make(map[NodeActivationType]ActivationFunction),
moduleActivators:make(map[NodeActivationType]ModuleActivationFunction),
forward:make(map[NodeActivationType]string),
inverse:make(map[string]NodeActivationType),
}
// Register neuron node activators
af.Register(SigmoidPlainActivation, plainSigmoid, "SigmoidPlainActivation")
af.Register(SigmoidReducedActivation, reducedSigmoid, "SigmoidReducedActivation")
af.Register(SigmoidSteepenedActivation, steepenedSigmoid, "SigmoidSteepenedActivation")
af.Register(SigmoidBipolarActivation, bipolarSigmoid, "SigmoidBipolarActivation")
af.Register(SigmoidApproximationActivation, approximationSigmoid, "SigmoidApproximationActivation")
af.Register(SigmoidSteepenedApproximationActivation, approximationSteepenedSigmoid, "SigmoidSteepenedApproximationActivation")
af.Register(SigmoidInverseAbsoluteActivation, inverseAbsoluteSigmoid, "SigmoidInverseAbsoluteActivation")
af.Register(SigmoidLeftShiftedActivation, leftShiftedSigmoid, "SigmoidLeftShiftedActivation")
af.Register(SigmoidLeftShiftedSteepenedActivation, leftShiftedSteepenedSigmoid, "SigmoidLeftShiftedSteepenedActivation")
af.Register(SigmoidRightShiftedSteepenedActivation, rightShiftedSteepenedSigmoid, "SigmoidRightShiftedSteepenedActivation")
af.Register(TanhActivation, hyperbolicTangent, "TanhActivation")
af.Register(GaussianBipolarActivation, bipolarGaussian, "GaussianBipolarActivation")
af.Register(LinearActivation, linear, "LinearActivation")
af.Register(LinearAbsActivation, absoluteLinear, "LinearAbsActivation")
af.Register(LinearClippedActivation, clippedLinear, "LinearClippedActivation")
af.Register(NullActivation, nullFunctor, "NullActivation")
af.Register(SignActivation, signFunction, "SignActivation")
af.Register(SineActivation, sineFunction, "SineActivation")
af.Register(StepActivation, stepFunction, "StepActivation")
// register neuron modules activators
af.RegisterModule(MultiplyModuleActivation, multiplyModule, "MultiplyModuleActivation")
af.RegisterModule(MaxModuleActivation, maxModule, "MaxModuleActivation")
af.RegisterModule(MinModuleActivation, minModule, "MinModuleActivation")
return af
}
// Method to calculate activation value for give input and auxiliary parameters using activation function with specified type.
// Will return error and -0.0 activation if unsupported activation type requested.
func (a *NodeActivatorsFactory) ActivateByType(input float64, aux_params[]float64, a_type NodeActivationType) (float64, error) {
if fn, ok := a.activators[a_type]; ok {
return fn(input, aux_params), nil
} else {
return -0.0, errors.New(fmt.Sprintf("Unknown neuron activation type: %d", a_type))
}
}
// Method will apply corresponding module activation function to the input values and returns appropriate output values.
// Will panic if unsupported activation function requested
func (a *NodeActivatorsFactory) ActivateModuleByType(inputs[] float64, aux_params[]float64, a_type NodeActivationType) ([]float64, error) {
if fn, ok := a.moduleActivators[a_type]; ok {
return fn(inputs, aux_params), nil
} else {
return nil, errors.New(fmt.Sprintf("Unknown module activation type: %d", a_type))
}
}
// Registers given neuron activation function with provided type and name into the factory
func (a *NodeActivatorsFactory) Register(a_type NodeActivationType, a_func ActivationFunction, f_name string) {
// store function
a.activators[a_type] = a_func
// store name<->type bi-directional mapping
a.forward[a_type] = f_name
a.inverse[f_name] = a_type
}
// Registers given neuron module activation function with provided type and name into the factory
func (a *NodeActivatorsFactory) RegisterModule(a_type NodeActivationType, a_func ModuleActivationFunction, f_name string) {
// store function
a.moduleActivators[a_type] = a_func
// store name<->type bi-directional mapping
a.forward[a_type] = f_name
a.inverse[f_name] = a_type
}
// Parse node activation type name and return corresponding activation type
func (a *NodeActivatorsFactory) ActivationTypeFromName(name string) (NodeActivationType, error) {
if t, ok := a.inverse[name]; ok {
return t, nil
} else {
return math.MaxInt8, errors.New("Unsupported activation type name: " + name)
}
}
// Returns activation function name from given type
func (a *NodeActivatorsFactory) ActivationNameFromType(atype NodeActivationType) (string, error) {
if n, ok := a.forward[atype]; ok {
return n, nil
} else {
return "", errors.New(fmt.Sprintf("Unsupported activation type: %d", atype))
}
}
// The sigmoid activation functions
var (
// The plain sigmoid
plainSigmoid = func(input float64, aux_params[]float64) float64 {
return (1 / (1 + math.Exp(-input)))
}
// The reduced sigmoid
reducedSigmoid = func(input float64, aux_params[]float64) float64 {
return (1 / (1 + math.Exp(-0.5 * input)))
}
// The steepened sigmoid
steepenedSigmoid = func(input float64, aux_params[]float64) float64 {
return 1.0 / (1.0 + math.Exp(-4.924273 * input))
}
// The bipolar sigmoid activation function xrange->[-1,1] yrange->[-1,1]
bipolarSigmoid = func(input float64, aux_params[]float64) float64 {
return (2.0 / (1.0 + math.Exp(-4.924273 * input))) - 1.0
}
// The approximation sigmoid with squashing range [-4.0; 4.0]
approximationSigmoid = func(input float64, aux_params[]float64) float64 {
four, one_32nd := float64(4.0), float64(0.03125)
if input < -4.0 {
return 0.0
} else if input < 0.0 {
return (input + four) * (input + four) * one_32nd
} else if input < 4.0 {
return 1.0 - (input - four) * (input - four) * one_32nd
} else {
return 1.0
}
}
// The steepened aproximation sigmoid with squashing range [-1.0; 1.0]
approximationSteepenedSigmoid = func(input float64, aux_params[]float64) float64 {
one, one_half := 1.0, 0.5
if input < -1.0 {
return 0.0
} else if input < 0.0 {
return (input + one) * (input + one) * one_half
} else if input < 1.0 {
return 1.0 - (input - one) * (input - one) * one_half
} else {
return 1.0;
}
}
// The inverse absolute sigmoid
inverseAbsoluteSigmoid = func(input float64, aux_params[]float64) float64 {
return 0.5 + (input / (1.0 + math.Abs(input))) * 0.5
}
// The left/right shifted sigmoids
leftShiftedSigmoid = func(input float64, aux_params[]float64) float64 {
return 1.0 / (1.0 + math.Exp(-input - 2.4621365))
}
leftShiftedSteepenedSigmoid = func(input float64, aux_params[]float64) float64 {
return 1.0 / (1.0 + math.Exp(-(4.924273 * input + 2.4621365)))
}
rightShiftedSteepenedSigmoid = func(input float64, aux_params[]float64) float64 {
return 1.0 / (1.0 + math.Exp(-(4.924273 * input - 2.4621365)))
}
)
// The other activation functions
var (
// The hyperbolic tangent
hyperbolicTangent = func(input float64, aux_params[]float64) float64 {
return math.Tanh(0.9 * input)
}
// The bipolar Gaussian activator xrange->[-1,1] yrange->[-1,1]
bipolarGaussian = func(input float64, aux_params[]float64) float64 {
return 2.0 * math.Exp(-math.Pow(input * 2.5, 2.0)) - 1.0
}
// The absolute linear
absoluteLinear = func(input float64, aux_params[]float64) float64 {
return math.Abs(input)
}
// Linear activation function with clipping. By 'clipping' we mean the output value is linear between
/// x = -1 and x = 1. Below -1 and above +1 the output is clipped at -1 and +1 respectively
clippedLinear = func(input float64, aux_params[]float64) float64 {
if (input < -1.0) {
return -1.0
}
if (input > 1.0) {
return 1.0
}
return input
}
// The linear activation
linear = func(input float64, aux_params[]float64) float64 {
return input
}
// The null activator
nullFunctor = func(input float64, aux_params[]float64) float64 {
return 0.0
}
// The sign activator
signFunction = func(input float64, aux_params[]float64) float64 {
if math.IsNaN(input) || input == 0.0 {
return 0.0
} else if math.Signbit(input) {
return -1.0
} else {
return 1.0
}
}
// The sine periodic activation with doubled period
sineFunction = func(input float64, aux_params[]float64) float64 {
return math.Sin(2.0 * input)
}
// The step function x<0 ? 0.0 : 1.0
stepFunction = func(input float64, aux_params[]float64) float64 {
if math.Signbit(input) {
return 0.0
} else {
return 1.0
}
}
)
// The modular activators
var (
// Multiplies input values and returns multiplication result
multiplyModule = func(inputs []float64, aux_params[]float64) []float64 {
ret := 1.0
for _, v := range inputs {
ret *= v
}
return []float64{ret}
}
// Finds maximal value among inputs and return it
maxModule = func(inputs []float64, aux_params[]float64) []float64 {
max := float64(math.MinInt64)
for _, v := range inputs {
max = math.Max(max, v)
}
return []float64{max}
}
// Finds minimal value among inputs and returns it
minModule = func(inputs []float64, aux_params[]float64) []float64 {
min := math.MaxFloat64
for _, v := range inputs {
min = math.Min(min, v)
}
return []float64{min}
}
) | neat/utils/activations.go | 0.826292 | 0.65276 | activations.go | starcoder |
package iso20022
// Provides further details on the agents specific to the individual transaction.
type TransactionAgents3 struct {
// Financial institution servicing an account for the debtor.
DebtorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"DbtrAgt,omitempty"`
// Financial institution servicing an account for the creditor.
CreditorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"CdtrAgt,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
//
// Usage: If more than one intermediary agent is present, then IntermediaryAgent1 identifies the agent between the DebtorAgent and the IntermediaryAgent2.
IntermediaryAgent1 *BranchAndFinancialInstitutionIdentification5 `xml:"IntrmyAgt1,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
//
// Usage: If more than two intermediary agents are present, then IntermediaryAgent2 identifies the agent between the IntermediaryAgent1 and the IntermediaryAgent3.
IntermediaryAgent2 *BranchAndFinancialInstitutionIdentification5 `xml:"IntrmyAgt2,omitempty"`
// Agent between the debtor's agent and the creditor's agent.
//
// Usage: If IntermediaryAgent3 is present, then it identifies the agent between the IntermediaryAgent 2 and the CreditorAgent.
IntermediaryAgent3 *BranchAndFinancialInstitutionIdentification5 `xml:"IntrmyAgt3,omitempty"`
// Party that receives securities from the delivering agent at the place of settlement, such as central securities depository.
// Can also be used in the context of treasury operations.
ReceivingAgent *BranchAndFinancialInstitutionIdentification5 `xml:"RcvgAgt,omitempty"`
// Party that delivers securities to the receiving agent at the place of settlement, such as a central securities depository.
// Can also be used in the context of treasury operations.
DeliveringAgent *BranchAndFinancialInstitutionIdentification5 `xml:"DlvrgAgt,omitempty"`
// Legal entity that has the right to issue securities.
IssuingAgent *BranchAndFinancialInstitutionIdentification5 `xml:"IssgAgt,omitempty"`
// Place where settlement of the securities takes place.
// Usage: This is typed by a financial institution identification as this is the standard way to identify a securities settlement agent/central system.
SettlementPlace *BranchAndFinancialInstitutionIdentification5 `xml:"SttlmPlc,omitempty"`
// Proprietary agent related to the underlying transaction.
Proprietary []*ProprietaryAgent3 `xml:"Prtry,omitempty"`
}
func (t *TransactionAgents3) AddDebtorAgent() *BranchAndFinancialInstitutionIdentification5 {
t.DebtorAgent = new(BranchAndFinancialInstitutionIdentification5)
return t.DebtorAgent
}
func (t *TransactionAgents3) AddCreditorAgent() *BranchAndFinancialInstitutionIdentification5 {
t.CreditorAgent = new(BranchAndFinancialInstitutionIdentification5)
return t.CreditorAgent
}
func (t *TransactionAgents3) AddIntermediaryAgent1() *BranchAndFinancialInstitutionIdentification5 {
t.IntermediaryAgent1 = new(BranchAndFinancialInstitutionIdentification5)
return t.IntermediaryAgent1
}
func (t *TransactionAgents3) AddIntermediaryAgent2() *BranchAndFinancialInstitutionIdentification5 {
t.IntermediaryAgent2 = new(BranchAndFinancialInstitutionIdentification5)
return t.IntermediaryAgent2
}
func (t *TransactionAgents3) AddIntermediaryAgent3() *BranchAndFinancialInstitutionIdentification5 {
t.IntermediaryAgent3 = new(BranchAndFinancialInstitutionIdentification5)
return t.IntermediaryAgent3
}
func (t *TransactionAgents3) AddReceivingAgent() *BranchAndFinancialInstitutionIdentification5 {
t.ReceivingAgent = new(BranchAndFinancialInstitutionIdentification5)
return t.ReceivingAgent
}
func (t *TransactionAgents3) AddDeliveringAgent() *BranchAndFinancialInstitutionIdentification5 {
t.DeliveringAgent = new(BranchAndFinancialInstitutionIdentification5)
return t.DeliveringAgent
}
func (t *TransactionAgents3) AddIssuingAgent() *BranchAndFinancialInstitutionIdentification5 {
t.IssuingAgent = new(BranchAndFinancialInstitutionIdentification5)
return t.IssuingAgent
}
func (t *TransactionAgents3) AddSettlementPlace() *BranchAndFinancialInstitutionIdentification5 {
t.SettlementPlace = new(BranchAndFinancialInstitutionIdentification5)
return t.SettlementPlace
}
func (t *TransactionAgents3) AddProprietary() *ProprietaryAgent3 {
newValue := new (ProprietaryAgent3)
t.Proprietary = append(t.Proprietary, newValue)
return newValue
} | TransactionAgents3.go | 0.69946 | 0.463262 | TransactionAgents3.go | starcoder |
package plot
// Plot defines a combination of elements that can be drawn to the canvas.
type Plot struct {
// X, Y are the axis information
X, Y *Axis
Margin Rect
Elements
// DefaultStyle
Theme
}
// Element is a drawable plot element.
type Element interface {
Draw(plot *Plot, canvas Canvas)
}
// Dataset represents an Element that contains data
type Dataset interface {
Element
// TODO: remove and replace with recommended Axis
Stats() Stats
}
// New creates a new empty plot.
func New() *Plot {
x, y := NewAxis(), NewAxis()
y.Flip = true
return &Plot{
X: x,
Y: y,
Theme: NewTheme(),
}
}
// Draw draws plot to the specified canvas, creating axes automatically when necessary.
func (plot *Plot) Draw(canvas Canvas) {
if !plot.X.IsValid() || !plot.Y.IsValid() {
tmpplot := &Plot{}
*tmpplot = *plot
plot = tmpplot
plot.X, plot.Y = detectAxis(plot.X, plot.Y, plot.Elements)
}
bounds := canvas.Bounds()
if !plot.Margin.Empty() {
bounds = bounds.Inset(plot.Margin)
}
for _, element := range plot.Elements {
element.Draw(plot, canvas.Context(bounds))
}
}
// AxisGroup allows sub-elements to have different different axes defined rather than the top-level plot.
type AxisGroup struct {
X, Y *Axis
Elements
}
// NewAxisGroup creates a new axis group.
func NewAxisGroup(els ...Element) *AxisGroup {
x, y := NewAxis(), NewAxis()
y.Flip = true
return &AxisGroup{
X: x, Y: y,
Elements: Elements(els),
}
}
// Update updates the axis values.
func (group *AxisGroup) Update() {
tx, ty := detectAxis(group.X, group.Y, group.Elements)
*group.X = *tx
*group.Y = *ty
}
// Draw draws elements bound to this axis-group creating an axis automatically if necessary.
func (group *AxisGroup) Draw(plot *Plot, canvas Canvas) {
tmpplot := &Plot{}
*tmpplot = *plot
if group.X != nil {
tmpplot.X = group.X
}
if group.Y != nil {
tmpplot.Y = group.Y
}
if !tmpplot.X.IsValid() || !tmpplot.Y.IsValid() {
tmpplot.X, tmpplot.Y = detectAxis(tmpplot.X, tmpplot.Y, group.Elements)
}
for _, element := range group.Elements {
element.Draw(tmpplot, canvas.Context(canvas.Bounds()))
}
} | plot.go | 0.782995 | 0.587677 | plot.go | starcoder |
package transform
import "errors"
// Sort by Rank Transform is a family of transforms typically used after
// a BWT to reduce the variance of the data prior to entropy coding.
// SBR(alpha) is defined by sbr(x, alpha) = (1-alpha)*(t-w1(x,t)) + alpha*(t-w2(x,t))
// where x is an item in the data list, t is the current access time and wk(x,t) is
// the k-th access time to x at time t (with 0 <= alpha <= 1).
// See [Two new families of list update algorihtms] by <NAME> for details.
// It turns out that SBR(0)= Move to Front Transform
// It turns out that SBR(1)= Time Stamp Transform
// This code implements SBR(0), SBR(1/2) and SBR(1). Code derived from openBWT
const (
MODE_MTF = 1 // alpha = 0
MODE_RANK = 2 // alpha = 1/2
MODE_TIMESTAMP = 3 // alpha = 1
)
type SBRT struct {
size uint
prev []int // size 256
curr []int // size 256
symbols []int // size 256
ranks []int // size 256
mode int
}
func NewSBRT(mode int, sz uint) (*SBRT, error) {
if mode != MODE_MTF && mode != MODE_RANK && mode != MODE_TIMESTAMP {
return nil, errors.New("Invalid mode parameter")
}
this := new(SBRT)
this.size = sz
this.mode = mode
this.prev = make([]int, 256)
this.curr = make([]int, 256)
this.symbols = make([]int, 256)
this.ranks = make([]int, 256)
return this, nil
}
func (this *SBRT) Size() uint {
return this.size
}
func (this *SBRT) SetSize(sz uint) bool {
this.size = sz
return true
}
func (this *SBRT) Forward(src, dst []byte) (uint, uint, error) {
count := int(this.size)
if count == 0 {
count = len(src)
}
// Aliasing
p := this.prev
q := this.curr
s2r := this.symbols
r2s := this.ranks
var mask1, mask2 int
var shift uint
if this.mode == MODE_TIMESTAMP {
mask1 = 0
} else {
mask1 = -1
}
if this.mode == MODE_MTF {
mask2 = 0
} else {
mask2 = -1
}
if this.mode == MODE_RANK {
shift = 1
} else {
shift = 0
}
for i := 0; i < 256; i++ {
p[i] = 0
q[i] = 0
s2r[i] = i
r2s[i] = i
}
for i := 0; i < count; i++ {
c := uint(src[i])
r := s2r[c]
dst[i] = byte(r)
q[c] = ((i & mask1) + (p[c] & mask2)) >> shift
p[c] = i
curVal := q[c]
// Move up symbol to correct rank
for r > 0 && q[r2s[r-1]] <= curVal {
r2s[r] = r2s[r-1]
s2r[r2s[r]] = r
r--
}
r2s[r] = int(c)
s2r[c] = r
}
return uint(count), uint(count), nil
}
func (this *SBRT) Inverse(src, dst []byte) (uint, uint, error) {
count := int(this.size)
if count == 0 {
count = len(src)
}
// Aliasing
p := this.prev
q := this.curr
r2s := this.ranks
var mask1, mask2 int
var shift uint
if this.mode == MODE_TIMESTAMP {
mask1 = 0
} else {
mask1 = -1
}
if this.mode == MODE_MTF {
mask2 = 0
} else {
mask2 = -1
}
if this.mode == MODE_RANK {
shift = 1
} else {
shift = 0
}
for i := 0; i < 256; i++ {
p[i] = 0
q[i] = 0
r2s[i] = i
}
for i := 0; i < count; i++ {
r := uint(src[i])
c := r2s[r]
dst[i] = byte(c)
q[c] = ((i & mask1) + (p[c] & mask2)) >> shift
p[c] = i
curVal := q[c]
// Move up symbol to correct rank
for r > 0 && q[r2s[r-1]] <= curVal {
r2s[r] = r2s[r-1]
r--
}
r2s[r] = c
}
return uint(count), uint(count), nil
} | go/src/kanzi/transform/SBRT.go | 0.765155 | 0.468122 | SBRT.go | starcoder |
package interpolation ; import ( "math" ; "github.com/sjbog/math_tools" )
/* Calculates the point ( by offset percent ) from a Bézier curve
Uses formulas for special cases : single point, linear, quadratic and cubic curves. See http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Examination_of_cases
Arguments
0.0 <= offset <= 1.0 : offset is a percentage of a full Bezier curve, where 0.0 == P0 ( control point 0 ) and 1.0 == Pn
Return
result : a point ( dimensions are the same as P0 ) from a Bezier curve on a specified offset ( percentage ) position
One should use Goroutines for calculating curves
*/
func Bezier_point ( control_points * [][] float64, offset float64 ) ( result [] float64 ) {
var points_len = uint ( len ( * control_points ) )
if points_len == 0 { return result }
// Care : Inclusive N
points_len --
var (
berstein_basis float64
degree = len ( ( * control_points ) [ 0 ] )
offset_complementary = 1.0 - offset
)
// Fill resulting point from P0 : P0 * ( 1 - t )
berstein_basis = math.Pow ( offset_complementary, float64 ( points_len ) )
for di := 0 ; di < degree ; di ++ {
result = append ( result, ( * control_points ) [ 0 ][ di ] * berstein_basis )
}
// Possible optimization : vector / matrix computation of point's dimensions
/* # Classic solution :
# We can improve it by going through Pascal's triangle row ( binomial ) sequentially
for point_i := uint ( 0 ) ; point_i <= points_len ; point_i ++ {
berstein_basis = Bernstein_basis ( points_len, point_i, offset )
for di := 0 ; di < degree ; di ++ {
result [ di ] += berstein_basis * ( * control_points ) [ point_i ][ di ]
}
}*/
switch points_len {
// P0 only : result = P0
case 0 : return ( * control_points ) [ 0 ]
// Linear : result = ( 1 - t ) * P0 + t * P1
case 1 :
var P0, P1 float64
for di := 0 ; di < degree ; di ++ {
P0, P1 = ( * control_points ) [ 0 ][ di ] , ( * control_points ) [ 1 ][ di ]
result [ di ] = P0 * offset_complementary + P1 * offset
}
return
// Quadratic : result = P0 * ( 1 − t )^2 + 2 * P1 * ( 1 − t ) * t + P2 * t^2
case 2 :
var (
P0, P1, P2 float64
offset_Mul_complementary = offset_complementary * offset
offset_Pow2 = offset * offset
offset_complementary_Pow2 = offset_complementary * offset_complementary
)
for di := 0 ; di < degree ; di ++ {
P0 = ( * control_points ) [ 0 ][ di ]
P1 = ( * control_points ) [ 1 ][ di ]
P2 = ( * control_points ) [ 2 ][ di ]
result [ di ] = P0 * offset_complementary_Pow2 +
2 * P1 * offset_Mul_complementary + P2 * offset_Pow2
}
return
// Cubic : result = P0 * ( 1 − t )^3 + 3 * P1 * t * ( 1 − t )^2 + 3 * P2 * ( 1 − t ) * t^2 + P3 * t^3
case 3 :
var (
P0, P1, P2, P3 float64
offset_Mul_complementary_Pow2 = offset * offset_complementary * offset_complementary
offset_Pow2_Mul_complementary = offset * offset * offset_complementary
offset_Pow3 = offset * offset * offset
offset_complementary_Pow3 = offset_complementary * offset_complementary * offset_complementary
)
for di := 0 ; di < degree ; di ++ {
P0 = ( * control_points ) [ 0 ][ di ]
P1 = ( * control_points ) [ 1 ][ di ]
P2 = ( * control_points ) [ 2 ][ di ]
P3 = ( * control_points ) [ 3 ][ di ]
result [ di ] = P0 * offset_complementary_Pow3 + P3 * offset_Pow3 + 3 *
( P1 * offset_Mul_complementary_Pow2 + P2 * offset_Pow2_Mul_complementary )
}
return
}
for point_i, binomial_coeff := uint ( 1 ), uint ( 1 )
point_i <= points_len
point_i ++ {
binomial_coeff = binomial_coeff * ( points_len - point_i + 1 ) / point_i
berstein_basis = float64 ( binomial_coeff ) *
math.Pow ( offset, float64 ( point_i ) ) *
math.Pow ( offset_complementary, float64 ( points_len - point_i ) )
for di := 0 ; di < degree ; di ++ {
result [ di ] += ( * control_points ) [ point_i ][ di ] * berstein_basis
}
}
return
}
/* Bernstein basis polynomials of degree n
Polynomials on http://mathworld.wolfram.com/BernsteinPolynomial.html
Arguments:
control_points_num : total number of control points
control_point_index : ( 0 based ) index of the control point of interest [ 0 <= control_point_index < control_points_num ]
offset : 0.0 <= offset <= 1.0 , see func Bezier_point
*/
func Bernstein_basis ( control_points_num, control_point_index uint, offset float64 ) float64 {
if control_points_num == 0 { return 1.0 }
if control_points_num == control_point_index {
var t = offset
for ; control_point_index > 1 ; control_point_index -- {
offset = t * offset
}
return offset
}
switch control_point_index {
case 0 :
var t = 1.0 - offset
offset = t
for ; control_points_num > 1 ; control_points_num -- {
offset = t * offset
}
return offset
case 1 :
var t = 1.0 - offset
offset = float64 ( control_points_num ) * offset
for ; control_points_num > 1 ; control_points_num -- {
offset = t * offset
}
return offset
case 2 :
if control_points_num == 3 {
return 3.0 * ( 1.0 - offset ) * offset * offset
}
if control_points_num == 4 {
return 6.0 * ( 1.0 - offset ) * ( 1.0 - offset ) * offset * offset
}
}
if control_points_num == control_point_index +1 {
var t = float64 ( control_points_num ) * ( 1.0 - offset ) * offset
for ; control_point_index > 1 ; control_point_index -- {
t = t * offset
}
return t
}
return float64 ( math_tools.Binomial_coefficient ( control_points_num, control_point_index ) ) *
math.Pow ( offset, float64 ( control_point_index ) ) *
math.Pow ( ( 1.0 - offset ), float64 ( control_points_num - control_point_index ) )
} | interpolation/bezier.go | 0.681515 | 0.537102 | bezier.go | starcoder |
package equipables
import (
"github.com/lquesada/cavernal/assets"
"github.com/lquesada/cavernal/helpers"
"github.com/lquesada/cavernal/entity"
"github.com/lquesada/cavernal/model"
"github.com/lquesada/cavernal/lib/g3n/engine/math32"
)
// --
var woodenShieldModel = &model.NodeSpec{
Decoder: model.Load(dir, "woodenshield", assets.Files),
Transform: &model.Transform{
Position: &math32.Vector3{1, 11, -1},
Scale: model.X3.Scale,
},
}
func NewWoodenShield() *helpers.BasicEquipable {
return (&helpers.BasicEquipableSpecification{
Category: entity.Common,
Name: "wooden shield",
ItemType: entity.Shield,
DefenseValue: 4,
EquippedNode: woodenShieldModel.Build(),
DroppedNode: woodenShieldModel.Build(),
InventoryNode: woodenShieldModel.Build(),
}).New()
}
// --
var reinforcedShieldModel = &model.NodeSpec{
Decoder: model.Load(dir, "reinforcedshield", assets.Files),
Transform: &model.Transform{
Position: &math32.Vector3{1, 11, -1},
Scale: model.X3.Scale,
},
}
func NewReinforcedShield() *helpers.BasicEquipable {
return (&helpers.BasicEquipableSpecification{
Category: entity.Common,
Name: "reinforced shield",
ItemType: entity.Shield,
DefenseValue: 5,
EquippedNode: reinforcedShieldModel.Build(),
DroppedNode: reinforcedShieldModel.Build(),
InventoryNode: reinforcedShieldModel.Build(),
}).New()
}
// --
var ironShieldModel = &model.NodeSpec{
Decoder: model.Load(dir, "ironshield", assets.Files),
Transform: &model.Transform{
Position: &math32.Vector3{1, 11, -1},
Scale: model.X3.Scale,
},
}
func NewIronShield() *helpers.BasicEquipable {
return (&helpers.BasicEquipableSpecification{
Category: entity.Common,
Name: "iron shield",
ItemType: entity.Shield,
DefenseValue: 7,
EquippedNode: ironShieldModel.Build(),
DroppedNode: ironShieldModel.Build(),
InventoryNode: ironShieldModel.Build(),
}).New()
}
// --
var spikeShieldModel = &model.NodeSpec{
Decoder: model.Load(dir, "spikeshield", assets.Files),
Transform: &model.Transform{
Position: &math32.Vector3{1, 11, -1},
Scale: model.X3.Scale,
},
}
func NewSpikeShield() *helpers.BasicEquipable {
return (&helpers.BasicEquipableSpecification{
Category: entity.Common,
Name: "spike shield",
ItemType: entity.Shield,
DefenseValue: 8,
EquippedNode: spikeShieldModel.Build(),
DroppedNode: spikeShieldModel.Build(),
InventoryNode: spikeShieldModel.Build(),
}).New()
}
// -- | assets/equipables/shields.go | 0.520984 | 0.487734 | shields.go | starcoder |
package rmath
import (
"fmt"
"math"
)
// NewVector3 creates a Vector3 initialized to 0.0, 0.0, 0.0
func NewVector3() *Vector3 {
v := new(Vector3)
v.X = 0.0
v.Y = 0.0
v.Z = 0.0
return v
}
// NewVector3With3Components creates a Vector3 initialized with x,y,z
func NewVector3With3Components(x, y, z float32) *Vector3 {
v := new(Vector3)
v.X = x
v.Y = y
v.Z = z
return v
}
// NewVector3With2Components creates a Vector3 initialized with x,y and z = 0.0
func NewVector3With2Components(x, y float32) *Vector3 {
v := new(Vector3)
v.X = x
v.Y = y
v.Z = 0.0
return v
}
// Clone returns a new copy this vector
func (v *Vector3) Clone() *Vector3 {
c := new(Vector3)
c.X = v.X
c.Y = v.Y
c.Z = v.Z
return c
}
// Set3Components modifies x,y,z
func (v *Vector3) Set3Components(x, y, z float32) {
v.X = x
v.Y = y
v.Z = z
}
// Set2Components modifies x,y only
func (v *Vector3) Set2Components(x, y float32) {
v.X = x
v.Y = y
}
// Set modifies x,y,z from source
func (v *Vector3) Set(source *Vector3) {
v.X = source.X
v.Y = source.Y
v.Z = source.Z
}
// Add a Vector3 to this vector
func (v *Vector3) Add(src *Vector3) *Vector3 {
v.X += src.X
v.Y += src.Y
v.Z += src.Z
return v
}
// Add2Components adds x and y to this vector
func (v *Vector3) Add2Components(x, y float32) *Vector3 {
v.X += x
v.Y += y
return v
}
// Sub subtracts a Vector3 to this vector
func (v *Vector3) Sub(src *Vector3) *Vector3 {
v.X -= src.X
v.Y -= src.Y
v.Z -= src.Z
return v
}
// Sub2Components subtracts x and y to this vector
func (v *Vector3) Sub2Components(x, y float32) *Vector3 {
v.X -= x
v.Y -= y
return v
}
// ScaleBy scales this vector by s
func (v *Vector3) ScaleBy(s float32) *Vector3 {
v.X *= s
v.Y *= s
v.Z *= s
return v
}
// ScaleBy2Components scales this vector by sx and sy
func (v *Vector3) ScaleBy2Components(sx, sy float32) *Vector3 {
v.X *= sx
v.Y *= sy
return v
}
// MulAdd scales and adds src to this vector
func (v *Vector3) MulAdd(src *Vector3, scalar float32) *Vector3 {
v.X += src.X * scalar
v.Y += src.Y * scalar
v.Z += src.Z * scalar
return v
}
// Length returns the euclidean length
func Length(x, y, z float32) float32 {
return float32(math.Sqrt(float64(x*x + y*y + z*z)))
}
// Length returns the euclidean length
func (v *Vector3) Length() float32 {
return float32(math.Sqrt(float64(v.X*v.X + v.Y*v.Y + v.Z*v.Z)))
}
// LengthSquared returns the euclidean length squared
func LengthSquared(x, y, z float32) float32 {
return x*x + y*y + z*z
}
// LengthSquared returns the euclidean length squared
func (v *Vector3) LengthSquared() float32 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
// Equal makes an exact equality check. Use EqEpsilon, it is more realistic.
func (v *Vector3) Equal(other *Vector3) bool {
return v.X == other.X && v.Y == other.Y && v.Z == other.Z
}
// EqEpsilon makes an approximate equality check. Preferred
func (v *Vector3) EqEpsilon(other *Vector3) bool {
return (v.X-other.X) < Epsilon && (v.Y-other.Y) < Epsilon && (v.Z-other.Z) < Epsilon
}
// Distance finds the euclidean distance between the two specified vectors
func Distance(x1, y1, z1, x2, y2, z2 float32) float32 {
a := x2 - x1
b := y2 - y1
c := z2 - z1
return float32(math.Sqrt(float64(a*a + b*b + c*c)))
}
// Distance finds the euclidean distance between the two specified vectors
func (v *Vector3) Distance(src *Vector3) float32 {
a := src.X - v.X
b := src.Y - v.Y
c := src.Z - v.Z
return float32(math.Sqrt(float64(a*a + b*b + c*c)))
}
// DistanceSquared finds the euclidean distance between the two specified vectors squared
func DistanceSquared(x1, y1, z1, x2, y2, z2 float32) float32 {
a := x2 - x1
b := y2 - y1
c := z2 - z1
return a*a + b*b + c*c
}
// DistanceSquared finds the euclidean distance between the two specified vectors squared
func (v *Vector3) DistanceSquared(src *Vector3) float32 {
a := src.X - v.X
b := src.Y - v.Y
c := src.Z - v.Z
return a*a + b*b + c*c
}
// Dot returns the product between the two vectors
func Dot(x1, y1, z1, x2, y2, z2 float32) float32 {
return x1*x2 + y1*y2 + z1*z2
}
// DotByComponent returns the product between the two vectors
func (v *Vector3) DotByComponent(x, y, z float32) float32 {
return v.X*x + v.Y*y + v.Z*z
}
// Dot returns the product between the two vectors
func (v *Vector3) Dot(o *Vector3) float32 {
return v.X*o.X + v.Y*o.Y + v.Z*o.Z
}
// Cross sets this vector to the cross product between it and the other vector.
func (v *Vector3) Cross(o *Vector3) *Vector3 {
v.Set3Components(
v.Y*o.Z-v.Z*o.Y,
v.Z*o.X-v.X*o.Z,
v.X*o.Y-v.Y*o.X)
return v
}
// --------------------------------------------------------------------------
// Transforms
// --------------------------------------------------------------------------
// Mul left-multiplies the vector by the given matrix, assuming the fourth (w) component of the vector is 1.
func (v *Vector3) Mul(m *Matrix4) *Vector3 {
v.Set3Components(
v.X*m.e[M00]+v.Y*m.e[M01]+v.Z*m.e[M02]+m.e[M03],
v.X*m.e[M10]+v.Y*m.e[M11]+v.Z*m.e[M12]+m.e[M13],
v.X*m.e[M20]+v.Y*m.e[M21]+v.Z*m.e[M22]+m.e[M23])
return v
}
func (v Vector3) String() string {
return fmt.Sprintf("<%f, %f, %f>", v.X, v.Y, v.Z)
} | ranger/rmath/vector3.go | 0.906382 | 0.703549 | vector3.go | starcoder |
package graph
import (
"container/list"
"fmt"
"math/rand"
"time"
)
type GraphGenerator struct {
// EnableAsymetricDistances (it true, default false) allows the graph to have different edge lengths from A to B than B to A
// (e.g. to simulate different routes between two locations due to one-way streets).
EnableAsymetricDistances bool
// EnableUnidirectionalEdges (if true, default false) allows the graph to have an edge from node A to B, without a corresponding edge from B to A.
// All vertices will have paths to all other vertices, even if this is enabled.
EnableUnidirectionalEdges bool
// MaxEdges determines the maximum number of edges each vertex can have.
// This must be greater than or equal to MinEdges, and this must be at least 2.
MaxEdges uint8
// MinEdges determines the minimum number of edge each vertex should have.
// This must be at least 1.
MinEdges uint8
// NumVertices determines the number of vertices to generate.
// This must be at least 3.
NumVertices uint32
// Seed is used to initialize the random algorithm.
// This should be used to reproduce the same graph across multiple tests.
// If this is nil, a seed will be automatically generated.
Seed *int64
}
func (gen *GraphGenerator) Create() *Graph {
if gen.MaxEdges < 2 {
panic(fmt.Errorf("MaxEdges must be at least 2, supplied value=%v", gen.MaxEdges))
} else if gen.MinEdges < 1 {
panic(fmt.Errorf("MinEdges must be at least 1, supplied value=%v", gen.MinEdges))
} else if gen.MaxEdges < gen.MinEdges {
panic(fmt.Errorf("MaxEdges must be at least MinEdges, MaxEdges=%v MinEdges=%v", gen.MaxEdges, gen.MinEdges))
} else if gen.NumVertices < 3 {
panic(fmt.Errorf("NumVertices must be at least 3, supplied value=%v", gen.NumVertices))
}
availableNames := list.New()
availableNames.Init()
for i := 0; i < int(gen.NumVertices); i++ {
availableNames.PushBack(buildVertexName(i))
}
var random *rand.Rand
if gen.Seed != nil {
random = rand.New(rand.NewSource(*gen.Seed))
} else {
random = rand.New(rand.NewSource(time.Now().UnixNano()))
}
vertices := []*GraphVertex{}
// Set up a basic graph where the is at least a unidirectional circuit in the graph.
nextVertex := generateVertex(availableNames, random)
for availableNames.Len() > 0 {
current := nextVertex
vertices = append(vertices, current)
nextVertex = generateVertex(availableNames, random)
gen.linkVertices(current, nextVertex, random)
}
// Append the final vertex to the graph
vertices = append(vertices, nextVertex)
// Update each node in the graph to have a random number of edges between MinEdges and MaxEdges
// Note: this may produce Vertices with more edges than MaxEdges, but that doesn't cause any issues so I am not fixing it (at this time).
numEdgesRange := gen.MaxEdges - gen.MinEdges
for _, v := range vertices {
numEdges := gen.MinEdges
if numEdgesRange > 0 {
numEdges += uint8(random.Int31n(int32(numEdgesRange)))
}
for len(v.adjacentVertices) < int(numEdges) {
destinationIndex := random.Intn(int(gen.NumVertices))
gen.linkVertices(v, vertices[destinationIndex], random)
}
}
return NewGraph(vertices)
}
func buildVertexName(index int) string {
name := ""
remainder := index % 26
// Notes:
// - need (result+25)%26 to allow `a` to appear in the first letter of the name. Otherwise, it is treated as 0 and `b`` would be treated as `1`.
// - need (result-1)%26 to allow `za`-`zz` to appear in the list due to the manipulation we are doing to the remainder.
for result := index / 26; result > 0; remainder, result = (result+25)%26, (result-1)/26 {
name = fmt.Sprintf("%c%s", 'a'+remainder, name)
}
name = fmt.Sprintf("%c%s", 'a'+remainder, name)
return name
}
func generateVertex(availableNames *list.List, random *rand.Rand) *GraphVertex {
nameIndex := random.Intn(availableNames.Len())
current := availableNames.Front()
for i := 0; i < nameIndex; i, current = i+1, current.Next() {
}
name := availableNames.Remove(current)
return NewGraphVertex(name.(string))
}
func (gen *GraphGenerator) linkVertices(a *GraphVertex, b *GraphVertex, random *rand.Rand) {
distAB := random.Float64() * 10000.0
a.adjacentVertices[b] = distAB
// Set up the return edge, if enabled.
if !gen.EnableUnidirectionalEdges {
distBA := distAB
if gen.EnableAsymetricDistances {
distBA = random.Float64() * 10000.0
}
b.adjacentVertices[a] = distBA
}
} | graph/graphgenerator.go | 0.632957 | 0.528412 | graphgenerator.go | starcoder |
package assert
import (
"fmt"
"testing"
"github.com/slcjordan/poc"
)
type Board struct {
assertion *Assertion
scoreCheckers []Int32Checker
Piles PositionedCardArray2D
}
func newBoard(assertion *Assertion) Board {
return Board{
assertion: assertion,
Piles: newPositionedCardArray2D(assertion),
}
}
func (parent *Board) Score(checkers ...Int32Checker) *Assertion {
parent.scoreCheckers = checkers
return parent.assertion
}
func (parent *Board) CheckBoard(t *testing.T, desc string, val poc.Board) {
for _, checker := range parent.scoreCheckers {
checker.CheckInt32(t, desc+".Score", val.Score)
}
parent.Piles.CheckPositionedCardArray2D(t, desc+".Piles", val.Piles)
}
type Card struct {
assertion *Assertion
Index Index
Suit Suit
}
func newCard(assertion *Assertion) Card {
return Card{
assertion: assertion,
Index: newIndex(assertion),
Suit: newSuit(assertion),
}
}
func (parent *Card) CheckCard(t *testing.T, desc string, val poc.Card) {
parent.Index.CheckIndex(t, desc+".Index", val.Index)
parent.Suit.CheckSuit(t, desc+".Suit", val.Suit)
}
type Index struct {
assertion *Assertion
uint8Checkers []Uint8Checker
}
func newIndex(assertion *Assertion) Index {
return Index{
assertion: assertion,
}
}
func (parent *Index) Uint8(checkers ...Uint8Checker) *Assertion {
parent.uint8Checkers = checkers
return parent.assertion
}
func (parent *Index) CheckIndex(t *testing.T, desc string, val poc.Index) {
for _, checker := range parent.uint8Checkers {
checker.CheckUint8(t, desc+".uint8", uint8(val))
}
}
type Move struct {
assertion *Assertion
newPileIndexCheckers []IntChecker
newPileNumCheckers []IntChecker
oldPileIndexCheckers []IntChecker
oldPileNumCheckers []IntChecker
NewPilePosition Position
OldPilePosition Position
}
func newMove(assertion *Assertion) Move {
return Move{
assertion: assertion,
NewPilePosition: newPosition(assertion),
OldPilePosition: newPosition(assertion),
}
}
func (parent *Move) NewPileIndex(checkers ...IntChecker) *Assertion {
parent.newPileIndexCheckers = checkers
return parent.assertion
}
func (parent *Move) NewPileNum(checkers ...IntChecker) *Assertion {
parent.newPileNumCheckers = checkers
return parent.assertion
}
func (parent *Move) OldPileIndex(checkers ...IntChecker) *Assertion {
parent.oldPileIndexCheckers = checkers
return parent.assertion
}
func (parent *Move) OldPileNum(checkers ...IntChecker) *Assertion {
parent.oldPileNumCheckers = checkers
return parent.assertion
}
func (parent *Move) CheckMove(t *testing.T, desc string, val poc.Move) {
for _, checker := range parent.newPileIndexCheckers {
checker.CheckInt(t, desc+".NewPileIndex", val.NewPileIndex)
}
for _, checker := range parent.newPileNumCheckers {
checker.CheckInt(t, desc+".NewPileNum", val.NewPileNum)
}
for _, checker := range parent.oldPileIndexCheckers {
checker.CheckInt(t, desc+".OldPileIndex", val.OldPileIndex)
}
for _, checker := range parent.oldPileNumCheckers {
checker.CheckInt(t, desc+".OldPileNum", val.OldPileNum)
}
parent.NewPilePosition.CheckPosition(t, desc+".NewPilePosition", val.NewPilePosition)
parent.OldPilePosition.CheckPosition(t, desc+".OldPilePosition", val.OldPilePosition)
}
type Position struct {
assertion *Assertion
uint64Checkers []Uint64Checker
}
func newPosition(assertion *Assertion) Position {
return Position{
assertion: assertion,
}
}
func (parent *Position) Uint64(checkers ...Uint64Checker) *Assertion {
parent.uint64Checkers = checkers
return parent.assertion
}
func (parent *Position) CheckPosition(t *testing.T, desc string, val poc.Position) {
for _, checker := range parent.uint64Checkers {
checker.CheckUint64(t, desc+".uint64", uint64(val))
}
}
type PositionedCard struct {
assertion *Assertion
Card Card
Position Position
}
func newPositionedCard(assertion *Assertion) PositionedCard {
return PositionedCard{
assertion: assertion,
Card: newCard(assertion),
Position: newPosition(assertion),
}
}
func (parent *PositionedCard) CheckPositionedCard(t *testing.T, desc string, val poc.PositionedCard) {
parent.Card.CheckCard(t, desc+".Card", val.Card)
parent.Position.CheckPosition(t, desc+".Position", val.Position)
}
type Suit struct {
assertion *Assertion
uint8Checkers []Uint8Checker
}
func newSuit(assertion *Assertion) Suit {
return Suit{
assertion: assertion,
}
}
func (parent *Suit) Uint8(checkers ...Uint8Checker) *Assertion {
parent.uint8Checkers = checkers
return parent.assertion
}
func (parent *Suit) CheckSuit(t *testing.T, desc string, val poc.Suit) {
for _, checker := range parent.uint8Checkers {
checker.CheckUint8(t, desc+".uint8", uint8(val))
}
}
type Variant struct {
assertion *Assertion
maxTimesThroughDeckCheckers []Int32Checker
}
func newVariant(assertion *Assertion) Variant {
return Variant{
assertion: assertion,
}
}
func (parent *Variant) MaxTimesThroughDeck(checkers ...Int32Checker) *Assertion {
parent.maxTimesThroughDeckCheckers = checkers
return parent.assertion
}
func (parent *Variant) CheckVariant(t *testing.T, desc string, val poc.Variant) {
for _, checker := range parent.maxTimesThroughDeckCheckers {
checker.CheckInt32(t, desc+".MaxTimesThroughDeck", val.MaxTimesThroughDeck)
}
}
type PositionedCardArray1D struct {
assertion *Assertion
lengthCheckers []IntChecker
nth map[int]PositionedCard
ForEach PositionedCard
}
func newPositionedCardArray1D(assertion *Assertion) PositionedCardArray1D {
return PositionedCardArray1D{
assertion: assertion,
nth: make(map[int]PositionedCard),
ForEach: newPositionedCard(assertion),
}
}
func (a *PositionedCardArray1D) Nth(i int) PositionedCard {
prev, ok := a.nth[i]
if ok {
return prev
}
result := newPositionedCard(a.assertion)
a.nth[i] = result
return result
}
func (a *PositionedCardArray1D) Length(checkers ...IntChecker) *Assertion {
a.lengthCheckers = checkers
return a.assertion
}
func (a *PositionedCardArray1D) CheckPositionedCardArray1D(t *testing.T, desc string, val []poc.PositionedCard) {
for _, checker := range a.lengthCheckers {
checker.CheckInt(t, desc+".length", len(val))
}
for i, checker := range a.nth {
checker.CheckPositionedCard(t, desc+fmt.Sprintf("[%d]", i), val[i])
}
for _, curr := range val {
a.ForEach.CheckPositionedCard(t, desc+".ForEach", curr)
}
}
type PositionedCardArray2D struct {
assertion *Assertion
lengthCheckers []IntChecker
nth map[int]PositionedCardArray1D
ForEach PositionedCardArray1D
}
func newPositionedCardArray2D(assertion *Assertion) PositionedCardArray2D {
return PositionedCardArray2D{
assertion: assertion,
nth: make(map[int]PositionedCardArray1D),
ForEach: newPositionedCardArray1D(assertion),
}
}
func (a *PositionedCardArray2D) Nth(i int) PositionedCardArray1D {
prev, ok := a.nth[i]
if ok {
return prev
}
result := newPositionedCardArray1D(a.assertion)
a.nth[i] = result
return result
}
func (a *PositionedCardArray2D) Length(checkers ...IntChecker) *Assertion {
a.lengthCheckers = checkers
return a.assertion
}
func (a *PositionedCardArray2D) CheckPositionedCardArray2D(t *testing.T, desc string, val [13][]poc.PositionedCard) {
for _, checker := range a.lengthCheckers {
checker.CheckInt(t, desc+".length", len(val))
}
for i, checker := range a.nth {
checker.CheckPositionedCardArray1D(t, desc+fmt.Sprintf("[%d]", i), val[i])
}
for _, curr := range val {
a.ForEach.CheckPositionedCardArray1D(t, desc+".ForEach", curr)
}
} | test/assert/model.go | 0.753829 | 0.615983 | model.go | starcoder |
package gridserver
import (
"math"
)
const (
earthRadiusMeters = 6378137
earthCircumferenceMeters = 2 * math.Pi * earthRadiusMeters
)
// Projection defines the interface for types that convert between pixel and lat/lng coordinates.
type Projection interface {
TileOrigin(tx, ty, zoom int) (float64, float64)
TileLatLngBounds(tx, ty, zoom int) (float64, float64, float64, float64)
LatLngToRaster(float64, float64, float64) (x, y float64)
String() string
}
// MercatorTMS provides a spherical mercator projection using TMS tile specifications.
// Although TMS tiles are numbered from south to north, raster coordinates are numbered from north to south.
// This code is indebted to the gdal2tiles.py from OSGEO GDAL.
type MercatorTMS struct {
tileSize float64
metersPerPixel float64
originShift float64
}
// NewMercatorTMS gets new projection object.
func NewMercatorTMS() *MercatorTMS {
m := MercatorTMS{
tileSize: tileSize,
metersPerPixel: earthCircumferenceMeters / tileSize,
originShift: earthCircumferenceMeters / 2,
}
return &m
}
// TileOrigin returns the left and top of the tile in raster pixels.
func (m *MercatorTMS) TileOrigin(tx, ty, zoom int) (x, y float64) {
// Flip y into WMS numbering (north to south).
ty = int(math.Pow(2, float64(zoom))) - ty - 1
y = float64(ty) * tileSize
x = float64(tx) * tileSize
return
}
// TileLatLngBounds returns bounds of a TMS tile in latitude/longitude using WGS84 datum.
func (m *MercatorTMS) TileLatLngBounds(tx, ty, zoom int) (latlo, lnglo, lathi, lnghi float64) {
minx, miny := m.pixelsToMeters(float64(tx)*m.tileSize, float64(ty)*m.tileSize, float64(zoom))
maxx, maxy := m.pixelsToMeters((float64(tx)+1)*m.tileSize, (float64(ty)+1)*m.tileSize, float64(zoom))
latlo, lnglo = m.metersToLatLng(minx, miny)
lathi, lnghi = m.metersToLatLng(maxx, maxy)
return
}
// LatLngToRaster converts a WGS84 latitude and longitude to absolute pixel values.
// Note that the pixel origin is at top left.
func (m *MercatorTMS) LatLngToRaster(lat, lng float64, zoom float64) (x, y float64) {
var mx, my float64
if lat < 0 {
// If the latitude is negative, work it out as if it was positive.
// (This is because the algorithm returns Inf if lat = -90.)
mx, my = m.latLngToMeters(-lat, lng)
} else {
mx, my = m.latLngToMeters(lat, lng)
}
resolution := m.metersPerPixel / math.Pow(2, zoom)
// Shift the meter values to the origin and convert them to pixels.
x = (mx + m.originShift) / resolution
y = (my + m.originShift) / resolution
// If the latitude was positive, convert the y coordinate to be numbered from top to bottom.
// (If it was negative, we don't have to do anything because we already reversed the latitude.)
if lat > 0 {
y = float64(int(m.tileSize)<<uint(zoom)) - y
}
return
}
// String provides the name of the projection.
func (m *MercatorTMS) String() string {
return "mercator"
}
// latLngToMeters converts given lat/lon in WGS84 Datum to XY in Spherical MercatorTMS EPSG:900913.
func (m *MercatorTMS) latLngToMeters(lat, lng float64) (mx float64, my float64) {
mx = lng * m.originShift / 180.0
my = math.Log(math.Tan((90+lat)*math.Pi/360.0)) / (math.Pi / 180.0)
my = my * m.originShift / 180.0
return
}
// metersToLatLng converts XY point from Spherical MercatorTMS EPSG:900913 to lat/lng in WGS84 Datum.
func (m *MercatorTMS) metersToLatLng(mx, my float64) (lat float64, lng float64) {
lng = (mx / m.originShift) * 180.0
lat = (my / m.originShift) * 180.0
lat = 180 / math.Pi * (2*math.Atan(math.Exp(lat*math.Pi/180.0)) - math.Pi/2.0)
return
}
// pixelsToMeters converts pixel coordinates in given zoom level of pyramid to EPSG:900913.
func (m *MercatorTMS) pixelsToMeters(px, py, zoom float64) (mx, my float64) {
resolution := m.metersPerPixel / math.Pow(2, zoom)
mx = px*resolution - m.originShift
my = py*resolution - m.originShift
return
}
// GeodeticTMS provides a EPSG:4326 projection.
// The top zoom level is scaled to two tiles. Zoom levels are not square but rectangular.
// Although TMS tiles are numbered from south to north, raster coordinates are numbered from north to south.
// This code is indebted to the gdal2tiles.py from OSGEO GDAL.
type GeodeticTMS struct {
tileSize float64
resFact float64
originShift float64
}
// NewGeodeticTMS gets new projection object.
func NewGeodeticTMS() *GeodeticTMS {
g := GeodeticTMS{
tileSize: tileSize,
resFact: 180.0 / tileSize,
}
return &g
}
// TileOrigin returns the left and top of the tile in raster pixels.
func (g *GeodeticTMS) TileOrigin(tx, ty, zoom int) (x, y float64) {
// Flip y into WMS numbering (north to south).
ty = int(math.Pow(2, float64(zoom))) - ty - 1
y = float64(ty) * tileSize
x = float64(tx) * tileSize
return
}
// TileLatLngBounds returns bounds of a TMS tile in latitude/longitude using WGS84 datum.
func (g *GeodeticTMS) TileLatLngBounds(tx, ty, zoom int) (latlo, lnglo, lathi, lnghi float64) {
res := g.resFact / math.Pow(2, float64(zoom))
lnglo = float64(tx)*tileSize*res - 180
latlo = float64(ty)*tileSize*res - 90
lnghi = float64(tx+1)*tileSize*res - 180
lathi = float64(ty+1)*tileSize*res - 90
return
}
// LatLngToRaster converts a WGS84 latitude and longitude to absolute pixel values.
// Note that the pixel origin is at top left.
func (g *GeodeticTMS) LatLngToRaster(lat, lng float64, zoom float64) (x, y float64) {
res := g.resFact / math.Pow(2, float64(zoom))
x = (180 + lng) / res
// Use -lat because we want the pixel origin to be at the top, not at the bottom.
y = (90 - lat) / res
return
}
// String provides the name of the projection.
func (g *GeodeticTMS) String() string {
return "geodetic"
} | tile_server/gridserver/projection.go | 0.904595 | 0.612541 | projection.go | starcoder |
package main
import (
"fmt"
"log"
"github.com/LdDl/cnns"
"github.com/LdDl/cnns/tensor"
"gonum.org/v1/gonum/mat"
)
func main() {
ExampleConv()
// ExampleConv2()
}
// ExampleConv Check how convolutional network's layers works with single channel image. Corresponding file is "step_by_step_cnn(dense inertia).xlsx" file.
/*
Using here 4 layers: convolutional, ReLU, pooling (max) and fc
Convolutinal features:
Single kernel: 3x3
Input size: 8x9x1 (width, height, depth)
ReLU features:
Input size: Convolutinal.OutputSize
Pooling (max) features:
Window size: 3x3
Input size: ReLU.OutputSize
FC:
Input size: Pooling.OutputSize
Outputsize: 3 (actually 3x1x1)
Using custom weights (for testing purposes) also.
*/
func ExampleConv() {
conv := cnns.NewConvLayer(&tensor.TDsize{X: 9, Y: 8, Z: 1}, 1, 3, 1)
relu := cnns.NewReLULayer(conv.GetOutputSize())
maxpool := cnns.NewPoolingLayer(relu.GetOutputSize(), 2, 2, "max", "valid")
fullyconnected := cnns.NewFullyConnectedLayer(maxpool.GetOutputSize(), 3)
convCustomWeights := mat.NewDense(3, 3, []float64{
0.10466029, -0.06228581, -0.43436298,
0.44050909, -0.07536250, -0.34348075,
0.16456005, 0.18682307, -0.40303048,
})
conv.SetCustomWeights([]*mat.Dense{convCustomWeights})
fcCustomWeights := mat.NewDense(3, maxpool.GetOutputSize().Total(), []float64{
-0.19908814, 0.01521263, 0.31363996, -0.28573613, -0.11934281, -0.18194183, -0.03111016, -0.21696585, -0.20689814,
0.17908468, -0.28144695, -0.29681312, -0.13912858, 0.07067328, 0.36249144, -0.20688576, -0.20291744, 0.25257304,
-0.29341734, 0.36533501, 0.19671917, 0.02382031, -0.47169692, -0.34167172, 0.10725344, 0.47524162, -0.42054638,
})
fullyconnected.SetCustomWeights([]*mat.Dense{fcCustomWeights})
net := cnns.WholeNet{
LP: cnns.NewLearningParametersDefault(),
}
net.Layers = append(net.Layers, conv)
net.Layers = append(net.Layers, relu)
net.Layers = append(net.Layers, maxpool)
net.Layers = append(net.Layers, fullyconnected)
image := mat.NewDense(9, 8, []float64{
-0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
-0.9, -0.10, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16,
-0.17, 0.18, -0.19, 0.20, 0.21, 0.22, 0.23, 0.24,
-0.25, 0.26, 0.27, -0.28, 0.29, 0.30, 0.31, 0.32,
-0.33, 0.34, 0.35, 0.36, -0.37, 0.38, 0.39, 0.40,
-0.41, 0.42, 0.43, 0.44, 0.45, -0.46, 0.47, 0.48,
-0.49, 0.50, 0.51, 0.52, 0.53, 0.54, -0.55, 0.56,
-0.57, 0.58, 0.59, 0.60, 0.61, 0.62, 0.63, -0.64,
-0.65, 0.66, 0.67, 0.68, 0.69, 0.70, 0.71, 0.72,
})
fmt.Printf("Layers weights:\n")
for i := range net.Layers {
fmt.Printf("%s #%d weights:\n", net.Layers[i].GetType(), i)
net.Layers[i].PrintWeights()
}
fmt.Println("\tDoing training....")
for e := 0; e < 3; e++ {
err := net.FeedForward(image)
if err != nil {
log.Printf("Feedforward caused error: %s", err.Error())
return
}
desired := mat.NewDense(3, 1, []float64{0.32, 0.45, 0.96})
err = net.Backpropagate(desired)
if err != nil {
log.Printf("Backpropagate caused error: %s", err.Error())
return
}
fmt.Printf("Epoch #%d. New layers weights\n", e)
for i := range net.Layers {
fmt.Printf("%s #%d weights on epoch #%d:\n", net.Layers[i].GetType(), i, e)
net.Layers[i].PrintWeights()
}
}
}
// ExampleConv2 Check how convolutional network's layers works with RGB-based image. Corresponding file is "step_by_step_cnn(rgb dense inertia).xlsx" file.
/*
Using here 4 layers: convolutional, ReLU, pooling (max) and fc
Convolutinal features:
Single kernel: 3x3
Input size: 5x5x3 (width, height, depth)
ReLU features:
Input size: Convolutinal.OutputSize
Pooling (max) features:
Window size: 3x3
Input size: ReLU.OutputSize
FC:
Input size: Pooling.OutputSize
Outputsize: 3 (actually 2x1x1)
Using custom weights (for testing purposes) also.
*/
func ExampleConv2() {
conv := cnns.NewConvLayer(&tensor.TDsize{X: 5, Y: 5, Z: 3}, 1, 3, 2)
relu := cnns.NewReLULayer(conv.GetOutputSize())
maxpool := cnns.NewPoolingLayer(relu.GetOutputSize(), 2, 2, "max", "valid")
fullyconnected := cnns.NewFullyConnectedLayer(maxpool.GetOutputSize(), 2)
net := cnns.WholeNet{
LP: cnns.NewLearningParametersDefault(),
}
net.Layers = append(net.Layers, conv)
net.Layers = append(net.Layers, relu)
net.Layers = append(net.Layers, maxpool)
net.Layers = append(net.Layers, fullyconnected)
redChannel := mat.NewDense(5, 5, []float64{
1, 0, 1, 0, 2,
1, 1, 3, 2, 1,
1, 1, 0, 1, 1,
2, 3, 2, 1, 3,
0, 2, 0, 1, 0,
})
greenChannel := mat.NewDense(5, 5, []float64{
1, 0, 0, 1, 0,
2, 0, 1, 2, 0,
3, 1, 1, 3, 0,
0, 3, 0, 3, 2,
1, 0, 3, 2, 1,
})
blueChannel := mat.NewDense(5, 5, []float64{
2, 0, 1, 2, 1,
3, 3, 1, 3, 2,
2, 1, 1, 1, 0,
3, 1, 3, 2, 0,
1, 1, 2, 1, 1,
})
kernel1R := mat.NewDense(3, 3, []float64{
0, 1, 0,
0, 0, 2,
0, 1, 0,
})
kernel1G := mat.NewDense(3, 3, []float64{
2, 1, 0,
0, 0, 0,
0, 3, 0,
})
kernel1B := mat.NewDense(3, 3, []float64{
1, 0, 0,
1, 0, 0,
0, 0, 2,
})
kernel2R := mat.NewDense(3, 3, []float64{
0, -1, 0,
0, 0, 2,
0, 1, 0,
})
kernel2G := mat.NewDense(3, 3, []float64{
2, 1, 0,
0, 0, 0,
0, -3, 0,
})
kernel2B := mat.NewDense(3, 3, []float64{
1, 0, 0,
1, 0, 0,
0, 0, -2,
})
img2 := &mat.Dense{}
img2.Stack(redChannel, greenChannel)
image := &mat.Dense{}
image.Stack(img2, blueChannel)
kernel1 := &mat.Dense{}
kernel1.Stack(kernel1R, kernel1G)
convCustomWeights1 := &mat.Dense{}
convCustomWeights1.Stack(kernel1, kernel1B)
kernel2 := &mat.Dense{}
kernel2.Stack(kernel2R, kernel2G)
convCustomWeights2 := &mat.Dense{}
convCustomWeights2.Stack(kernel2, kernel2B)
conv.SetCustomWeights([]*mat.Dense{convCustomWeights1, convCustomWeights2})
fcCustomWeights := mat.NewDense(2, maxpool.GetOutputSize().Total(), []float64{
-0.19908814, 0.01521263,
0.17908468, -0.28144695,
})
fullyconnected.SetCustomWeights([]*mat.Dense{fcCustomWeights})
fmt.Printf("Layers weights:\n")
for i := range net.Layers {
fmt.Printf("%s #%d weights:\n", net.Layers[i].GetType(), i)
net.Layers[i].PrintWeights()
}
fmt.Println("\tDoing training....")
for e := 0; e < 1; e++ {
err := net.FeedForward(image)
if err != nil {
log.Printf("Feedforward caused error: %s", err.Error())
return
}
desired := mat.NewDense(2, 1, []float64{0.15, 0.8})
err = net.Backpropagate(desired)
if err != nil {
log.Printf("Backpropagate caused error: %s", err.Error())
return
}
fmt.Printf("Epoch #%d. New layers weights\n", e)
for i := range net.Layers {
fmt.Printf("%s #%d weights on epoch #%d:\n", net.Layers[i].GetType(), i, e)
net.Layers[i].PrintWeights()
}
}
} | examples/simple_cnn/main.go | 0.731059 | 0.415966 | main.go | starcoder |
// Package pipeline contains Beam pipeline library functions for the SumDB
// verifiable map.
package pipeline
import (
"errors"
"fmt"
"github.com/apache/beam/sdks/v2/go/pkg/beam"
"github.com/golang/glog"
"github.com/google/trillian/experimental/batchmap"
)
// InputLog allows access to entries from the SumDB.
type InputLog interface {
// Head returns the metadata of available entries.
Head() (checkpoint []byte, count int64, err error)
// Entries returns a PCollection of Metadata, containing entries in range [start, end).
Entries(s beam.Scope, start, end int64) beam.PCollection
}
// InputLogMetadata describes the provenance information of the input
// log to be passed around atomically.
type InputLogMetadata struct {
Checkpoint []byte
Entries int64
}
// MapBuilder contains the static configuration for a map, and allows
// maps at different log sizes to be built using its methods.
type MapBuilder struct {
source InputLog
treeID int64
prefixStrata int
versionLogs bool
}
// NewMapBuilder returns a MapBuilder for a map with the given configuration.
func NewMapBuilder(source InputLog, treeID int64, prefixStrata int, versionLogs bool) MapBuilder {
return MapBuilder{
source: source,
treeID: treeID,
prefixStrata: prefixStrata,
versionLogs: versionLogs,
}
}
// Create builds a map from scratch, using the first `size` entries in the
// input log. If there aren't enough entries then it will fail.
// It returns a PCollection of *Tile as the first output, and any logs built
// will be output in the second PCollection (of type ModuleVersionLog).
func (b *MapBuilder) Create(s beam.Scope, size int64) (beam.PCollection, beam.PCollection, InputLogMetadata, error) {
var tiles, logs beam.PCollection
endID, golden, err := b.getLogEnd(size)
if err != nil {
return tiles, logs, InputLogMetadata{}, err
}
records := b.source.Entries(s.Scope("source"), 0, endID)
entries := CreateEntries(s, b.treeID, records)
if b.versionLogs {
var logEntries beam.PCollection
logEntries, logs = MakeVersionLogs(s, b.treeID, records)
entries = beam.Flatten(s, entries, logEntries)
}
glog.Infof("Creating new map revision from range [0, %d)", endID)
tiles, err = batchmap.Create(s, entries, b.treeID, hash, b.prefixStrata)
return tiles, logs, InputLogMetadata{
Checkpoint: golden,
Entries: endID,
}, err
}
// Update builds a map using the last version built, and updating it to
// include all the first `size` entries from the input log. If there aren't
// enough entries then it will fail.
// It returns a PCollection of *Tile as the first output.
func (b *MapBuilder) Update(s beam.Scope, lastTiles beam.PCollection, provenance InputLogMetadata, size int64) (beam.PCollection, InputLogMetadata, error) {
var tiles beam.PCollection
if b.versionLogs {
return tiles, InputLogMetadata{}, errors.New("unsupported: incremental build incompatible with version logs")
}
endID, golden, err := b.getLogEnd(size)
if err != nil {
return tiles, InputLogMetadata{}, err
}
startID := provenance.Entries
if startID >= endID {
return tiles, InputLogMetadata{}, fmt.Errorf("startID (%d) >= endID (%d)", startID, endID)
}
records := b.source.Entries(s.Scope("source"), startID, endID)
entries := CreateEntries(s, b.treeID, records)
glog.Infof("Updating with range [%d, %d)", startID, endID)
tiles, err = batchmap.Update(s, lastTiles, entries, b.treeID, hash, b.prefixStrata)
return tiles, InputLogMetadata{
Checkpoint: golden,
Entries: endID,
}, err
}
func (b *MapBuilder) getLogEnd(requiredEntries int64) (int64, []byte, error) {
golden, totalLeaves, err := b.source.Head()
if err != nil {
return 0, nil, fmt.Errorf("failed to get Head of input log: %v", err)
}
if requiredEntries < 0 {
return totalLeaves, golden, nil
}
if totalLeaves < requiredEntries {
return 0, nil, fmt.Errorf("wanted %d leaves but only %d available", requiredEntries, totalLeaves)
}
return requiredEntries, golden, nil
} | experimental/batchmap/sumdb/build/pipeline/pipeline.go | 0.835618 | 0.471102 | pipeline.go | starcoder |
package precedence
import (
"strconv"
"github.com/metalnem/parsing-algorithms/ast"
"github.com/metalnem/parsing-algorithms/parse"
"github.com/metalnem/parsing-algorithms/scan"
"github.com/pkg/errors"
)
type assoc int
const (
left assoc = iota
right
)
type symbol struct {
value string
lbp int
nud func(*state) (ast.Expr, error)
led func(*state, ast.Expr) (ast.Expr, error)
}
type parser struct {
}
type state struct {
s *scan.Scanner
t scan.Token
}
var symbols map[string]symbol
func init() {
symbols = byValue([]symbol{
op("+").infix(10, left).prefix(30),
op("-").infix(10, left).prefix(30),
op("*").infix(20, left),
op("/").infix(20, left),
op("^").infix(40, right),
})
}
// New creates a new top down operator precedence parser.
func New() parse.Parser {
return parser{}
}
func (parser) Parse(input string) (ast.Expr, error) {
s := scan.NewScanner(input)
t := s.Next()
state := &state{s: s, t: t}
expr, err := state.expression(0)
if err != nil {
return nil, err
}
if state.t.Type != scan.EOF {
return nil, errors.Errorf("Expected EOF, got %s", t.Value)
}
return expr, nil
}
func toSymbol(t scan.Token) symbol {
if t.Type == scan.LeftParen {
return paren()
}
if t.Type == scan.Operator {
return symbols[t.Value]
}
if t.Type == scan.Number {
return literal(t.Value)
}
return symbol{}
}
func (s *state) expression(bp int) (ast.Expr, error) {
t := toSymbol(s.t)
s.t = s.s.Next()
if t.nud == nil {
return nil, errors.Errorf("Expected expression, got %s", t.value)
}
left, err := t.nud(s)
if err != nil {
return nil, err
}
for token := toSymbol(s.t); bp < token.lbp; token = toSymbol(s.t) {
t = token
s.t = s.s.Next()
if t.led == nil {
return nil, errors.Errorf("Expected expression, got %s", t.value)
}
if left, err = t.led(s, left); err != nil {
return nil, err
}
}
return left, nil
}
func op(value string) symbol {
return symbol{value: value}
}
func paren() symbol {
var sym symbol
sym.nud = func(s *state) (ast.Expr, error) {
expr, err := s.expression(0)
if err != nil {
return nil, err
}
if s.t.Type != scan.RightParen {
return nil, errors.Errorf("Expected right paren, got %s", s.t.Value)
}
s.t = s.s.Next()
return expr, nil
}
return sym
}
func literal(value string) symbol {
sym := symbol{value: value}
sym.nud = func(s *state) (ast.Expr, error) {
val, err := strconv.ParseFloat(value, 64)
if err != nil {
return nil, errors.Errorf("Expected number, got %s", value)
}
return &ast.Number{Value: val}, nil
}
return sym
}
func (sym symbol) infix(bp int, assoc assoc) symbol {
sym.lbp = bp
if assoc == right {
bp = bp - 1
}
sym.led = func(s *state, left ast.Expr) (ast.Expr, error) {
expr, err := s.expression(bp)
if err != nil {
return nil, err
}
return &ast.BinaryExpr{Op: sym.value, X: left, Y: expr}, nil
}
return sym
}
func (sym symbol) prefix(bp int) symbol {
sym.nud = func(s *state) (ast.Expr, error) {
expr, err := s.expression(bp)
if err != nil {
return nil, err
}
return &ast.UnaryExpr{Op: sym.value, X: expr}, nil
}
return sym
}
func byValue(symbols []symbol) map[string]symbol {
m := make(map[string]symbol)
for _, symbol := range symbols {
m[symbol.value] = symbol
}
return m
} | parse/precedence/precedence.go | 0.762513 | 0.420957 | precedence.go | starcoder |
package main
import (
`fmt`
)
/**
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
Example:
Input: "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
*/
func letterCombinations(digits string) []string {
res := make([]string, 0)
digitWordsMap := map[string]string{
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz",
}
for _, digit := range digits {
words := digitWordsMap[string(digit)]
tmp := make([]string, 0)
for _, word := range words {
if len(res) > 0 {
for _, item := range res {
tmp = append(tmp, item+string(word))
}
} else {
tmp = append(tmp, string(word))
}
}
res = tmp
}
return res
}
func letterCombinationsMy(digits string) []string {
if len(digits) == 0 {
return []string{}
}
res := make([]string, 0)
mm := make([][]string, len(digits))
m := map[rune][]string{
'2': {"a", "b", "c"},
'3': {"d", "e", "f"},
'4': {"g", "h", "i"},
'5': {"j", "k", "l"},
'6': {"m", "n", "o"},
'7': {"p", "q", "r", "s"},
'8': {"t", "u", "v"},
'9': {"w", "x", "y", "z"},
}
for i, v := range digits {
if letter, ok := m[v]; ok {
mm[i] = letter
} else {
return []string{}
}
}
if len(digits) == 1 {
return mm[0]
}
mutArray2Array(mm, &res)
return res
}
func mutArray2Array(mut [][]string, res *[]string) {
if len(mut) < 2 {
if len(mut) == 1 {
len1, len2 := len(*res), len(mut[0])
newLen := len1 * len2
temp := make([]string, newLen)
index := 0
for _, v0 := range *res {
for _, v1 := range mut[0] {
temp[index] = v0 + v1
index++
}
}
*res = append(temp)
}
return
}
len1, len2 := len(mut[0]), len(mut[1])
newLen := len1 * len2
temp := make([]string, newLen)
index := 0
for _, v0 := range mut[0] {
for _, v1 := range mut[1] {
temp[index] = v0 + v1
index++
}
}
if len(*res) == 0 {
*res = append(*res, temp...)
} else {
newLen *= len(*res)
index = 0
temp1 := make([]string, newLen)
for _, v0 := range *res {
for _, v1 := range temp {
temp1[index] = v0 + v1
index++
}
}
*res = append(temp1)
}
mut = mut[2:]
mutArray2Array(mut, res)
}
func main() {
fmt.Println(letterCombinations("2"))
} | main/letterCombinations.go | 0.550849 | 0.497681 | letterCombinations.go | starcoder |
package tagexpr
import (
"math"
)
// --------------------------- Operator ---------------------------
type additionExprNode struct{ exprBackground }
func newAdditionExprNode() ExprNode { return &additionExprNode{} }
func (ae *additionExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
// positive number or Addition
v0 := ae.leftOperand.Run(currField, tagExpr)
v1 := ae.rightOperand.Run(currField, tagExpr)
switch r := v0.(type) {
case float64:
var v float64
v, _ = v1.(float64)
r += v
return r
case string:
var v string
v, _ = v1.(string)
r += v
return r
default:
return v1
}
}
type multiplicationExprNode struct{ exprBackground }
func newMultiplicationExprNode() ExprNode { return &multiplicationExprNode{} }
func (ae *multiplicationExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0, _ := ae.leftOperand.Run(currField, tagExpr).(float64)
v1, _ := ae.rightOperand.Run(currField, tagExpr).(float64)
return v0 * v1
}
type divisionExprNode struct{ exprBackground }
func newDivisionExprNode() ExprNode { return &divisionExprNode{} }
func (de *divisionExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v1, _ := de.rightOperand.Run(currField, tagExpr).(float64)
if v1 == 0 {
return math.NaN()
}
v0, _ := de.leftOperand.Run(currField, tagExpr).(float64)
return v0 / v1
}
type subtractionExprNode struct{ exprBackground }
func newSubtractionExprNode() ExprNode { return &subtractionExprNode{} }
func (de *subtractionExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0, _ := de.leftOperand.Run(currField, tagExpr).(float64)
v1, _ := de.rightOperand.Run(currField, tagExpr).(float64)
return v0 - v1
}
type remainderExprNode struct{ exprBackground }
func newRemainderExprNode() ExprNode { return &remainderExprNode{} }
func (re *remainderExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v1, _ := re.rightOperand.Run(currField, tagExpr).(float64)
if v1 == 0 {
return math.NaN()
}
v0, _ := re.leftOperand.Run(currField, tagExpr).(float64)
return float64(int64(v0) % int64(v1))
}
type equalExprNode struct{ exprBackground }
func newEqualExprNode() ExprNode { return &equalExprNode{} }
func (ee *equalExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0 := ee.leftOperand.Run(currField, tagExpr)
v1 := ee.rightOperand.Run(currField, tagExpr)
switch r := v0.(type) {
case float64:
r1, ok := v1.(float64)
if ok {
return r == r1
}
case string:
r1, ok := v1.(string)
if ok {
return r == r1
}
case bool:
r1, ok := v1.(bool)
if ok {
return r == r1
}
case nil:
return v1 == nil
}
return false
}
type notEqualExprNode struct{ equalExprNode }
func newNotEqualExprNode() ExprNode { return ¬EqualExprNode{} }
func (ne *notEqualExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
return !ne.equalExprNode.Run(currField, tagExpr).(bool)
}
type greaterExprNode struct{ exprBackground }
func newGreaterExprNode() ExprNode { return &greaterExprNode{} }
func (ge *greaterExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0 := ge.leftOperand.Run(currField, tagExpr)
v1 := ge.rightOperand.Run(currField, tagExpr)
switch r := v0.(type) {
case float64:
r1, ok := v1.(float64)
if ok {
return r > r1
}
case string:
r1, ok := v1.(string)
if ok {
return r > r1
}
}
return false
}
type greaterEqualExprNode struct{ exprBackground }
func newGreaterEqualExprNode() ExprNode { return &greaterEqualExprNode{} }
func (ge *greaterEqualExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0 := ge.leftOperand.Run(currField, tagExpr)
v1 := ge.rightOperand.Run(currField, tagExpr)
switch r := v0.(type) {
case float64:
r1, ok := v1.(float64)
if ok {
return r >= r1
}
case string:
r1, ok := v1.(string)
if ok {
return r >= r1
}
}
return false
}
type lessExprNode struct{ exprBackground }
func newLessExprNode() ExprNode { return &lessExprNode{} }
func (le *lessExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0 := le.leftOperand.Run(currField, tagExpr)
v1 := le.rightOperand.Run(currField, tagExpr)
switch r := v0.(type) {
case float64:
r1, ok := v1.(float64)
if ok {
return r < r1
}
case string:
r1, ok := v1.(string)
if ok {
return r < r1
}
}
return false
}
type lessEqualExprNode struct{ exprBackground }
func newLessEqualExprNode() ExprNode { return &lessEqualExprNode{} }
func (le *lessEqualExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
v0 := le.leftOperand.Run(currField, tagExpr)
v1 := le.rightOperand.Run(currField, tagExpr)
switch r := v0.(type) {
case float64:
r1, ok := v1.(float64)
if ok {
return r <= r1
}
case string:
r1, ok := v1.(string)
if ok {
return r <= r1
}
}
return false
}
type andExprNode struct{ exprBackground }
func newAndExprNode() ExprNode { return &andExprNode{} }
func (ae *andExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
for _, e := range [2]ExprNode{ae.leftOperand, ae.rightOperand} {
if !FakeBool(e.Run(currField, tagExpr)) {
return false
}
}
return true
}
type orExprNode struct{ exprBackground }
func newOrExprNode() ExprNode { return &orExprNode{} }
func (oe *orExprNode) Run(currField string, tagExpr *TagExpr) interface{} {
for _, e := range [2]ExprNode{oe.leftOperand, oe.rightOperand} {
if FakeBool(e.Run(currField, tagExpr)) {
return true
}
}
return false
} | spec_operator.go | 0.629433 | 0.508849 | spec_operator.go | starcoder |
package types
import (
"errors"
"fmt"
"strconv"
)
// PlmnID is a globally unique network identifier (Public Land Mobile Network)
type PlmnID uint32
// GnbID is a 5G gNodeB Identifier
type GnbID uint64
// EnbID is an eNodeB Identifier
type EnbID uint32
// CellID is a node-local cell identifier; 4 bits for 4G; 14 bits fo 5G
type CellID uint16
// NodeID is a general abstraction of a global E2 node identifier.
// It holds appropriately bit-shifted concatenation of either:
// - [PLMNID + GnbID] or
// - [PLMNID + EnbID]
// To extract the corresponding components, application must use the
// appropriate 4G or 5G method provided below.
type NodeID uint64
// NCI is a NR Cell Identifier; a 36-bit value (gNBID + CID)
type NCI uint64
// NCGI is NR Cell Global Identity (MCC+MNC+NCI)
type NCGI uint64
// ECI is a E-UTRAN Cell Identifier (gNBID + CID)
type ECI uint32
// ECGI is E-UTRAN Cell Global Identifier (MCC+MNC+ECI)
type ECGI uint64
// CRNTI is a cell-specific UE identifier
type CRNTI uint32
// MSIN is Mobile Subscriber Identification Number
type MSIN uint32
// IMSI is International Mobile Subscriber Identity
type IMSI uint64
const (
mask36 = 0xfffffffff
mask28 = 0xfffffff
mask20 = 0xfffff00
)
// EncodePlmnID encodes MCC and MNC strings into a PLMNID hex string
func EncodePlmnID(mcc string, mnc string) string {
if len(mnc) == 2 {
return string(mcc[1]) + string(mcc[0]) + "F" + string(mcc[2]) + string(mnc[1]) + string(mnc[0])
} else {
return string(mcc[1]) + string(mcc[0]) + string(mnc[2]) + string(mcc[2]) + string(mnc[1]) + string(mnc[0])
}
}
// DecodePlmnID decodes MCC and MNC strings from PLMNID hex string
func DecodePlmnID(plmnID string) (mcc string, mnc string) {
if plmnID[2] == 'f' || plmnID[2] == 'F' {
return string(plmnID[1]) + string(plmnID[0]) + string(plmnID[3]),
string(plmnID[5]) + string(plmnID[4])
} else {
return string(plmnID[1]) + string(plmnID[0]) + string(plmnID[3]),
string(plmnID[5]) + string(plmnID[4]) + string(plmnID[2])
}
}
// ToPlmnID encodes the specified MCC and MNC strings into a numeric PLMNID
func ToPlmnID(mcc string, mnc string) PlmnID {
s := EncodePlmnID(mcc, mnc)
n, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return 0
}
return PlmnID(n)
}
// PlmnIDFromHexString converts string form of PLMNID in its hex form into a numeric one suitable for APIs
func PlmnIDFromHexString(plmnID string) PlmnID {
n, err := strconv.ParseUint(plmnID, 16, 32)
if err != nil {
return 0
}
return PlmnID(n)
}
// PlmnIDFromString converts string form of PLMNID given as a simple MCC-MCN catenation into a numeric one suitable for APIs
func PlmnIDFromString(plmnID string) PlmnID {
return ToPlmnID(plmnID[0:3], plmnID[3:])
}
// PlmnIDToString generates the MCC-MCN catenation format from the specified numeric PLMNID
func PlmnIDToString(plmnID PlmnID) string {
hexString := fmt.Sprintf("%x", plmnID)
mcc, mnc := DecodePlmnID(hexString)
return mcc + mnc
}
// 5G Identifiers
var (
gnbBits uint8 = 22
cidBits uint8 = 14
gnbMask uint64 = 0b111111111111111111111100000000000000
cidMask uint64 = 0b000000000000000000000011111111111111
)
// SetNCIBitSplit sets how the NCI bits are split between gNBID and CID
func SetNCIBitSplit(gnb uint8, cid uint8) error {
if (gnb+cid) == 36 && 4 <= cid && cid <= 14 {
cidBits = cid
gnbMask = 0
cidMask = 0
for i := 0; i < 64; i++ {
b := uint8(i)
if b < cid {
cidMask |= 1 << i
}
if cid <= b && b < (cid+gnb) {
gnbMask |= 1 << i
}
}
return nil
}
return errors.New("invalid bit split")
}
// ToNCI produces NCI from the specified components
func ToNCI(gnbID GnbID, cid CellID) NCI {
return NCI(uint(gnbID)<<cidBits | uint(cid))
}
// ToNCGI produces NCGI from the specified components
func ToNCGI(plmnID PlmnID, nci NCI) NCGI {
return NCGI(uint(plmnID)<<36 | (uint(nci) & mask36))
}
// ToNodeID produces a 5G global node ID as a catenation of PLMNID + GnbID
func ToNodeID(plmnID PlmnID, gnbID GnbID) NodeID {
return NodeID(uint(plmnID)<<gnbBits | uint(gnbID))
}
// To5GNodeID produces a 5G global node ID as a catenation of PLMNID + GnbID
func To5GNodeID(plmnID PlmnID, gnbID GnbID) NodeID {
return ToNodeID(plmnID, gnbID)
}
// GetPlmnID extracts PLMNID from the specified NCGI
func GetPlmnID(ncgi uint64) PlmnID {
return PlmnID(ncgi >> 36)
}
// Get5GPlmnID extracts PLMNID from the specified NCGI
func Get5GPlmnID(ncgi uint64) PlmnID {
return GetPlmnID(ncgi)
}
// GetNCI extracts NCI from the specified NCGI
func GetNCI(ncgi NCGI) NCI {
return NCI(ncgi & mask36)
}
// GetGnbID extracts gNodeB ID from the specified NCGI or NCI
func GetGnbID(id uint64) GnbID {
return GnbID((id & gnbMask) >> cidBits)
}
// GetCellID extracts Cell ID from the specified NCGI or NCI
func GetCellID(id uint64) CellID {
return CellID(id & cidMask)
}
// Get5GCellID extracts Cell ID from the specified NCGI or NCI
func Get5GCellID(id uint64) CellID {
return GetCellID(id)
}
// 4G Identifiers
// ToECI produces ECI from the specified components
func ToECI(enbID EnbID, cid CellID) ECI {
return ECI(uint(enbID)<<8 | uint(cid))
}
// ToECGI produces ECGI from the specified components
func ToECGI(plmnID PlmnID, eci ECI) ECGI {
return ECGI(uint(plmnID)<<28 | (uint(eci) & mask28))
}
// To4GNodeID produces a 54 global node ID as a catenation of PLMNID + EnbID
func To4GNodeID(plmnID PlmnID, enbID EnbID) NodeID {
return NodeID(uint(plmnID)<<28 | uint(enbID))
}
// Get4GPlmnID extracts PLMNID from the specified ECGI or IMSI
func Get4GPlmnID(id uint64) PlmnID {
return PlmnID(id >> 28)
}
// Get4GCellID extracts Cell ID from the specified ECGI
func Get4GCellID(id uint64) CellID {
return CellID(id & 0xff)
}
// GetEnbID extracts Enb ID from the specified ECGI
func GetEnbID(id uint64) EnbID {
return EnbID((id & mask20) >> 8)
}
// GetECI extracts ECI from the specified ECGI
func GetECI(id uint64) ECI {
return ECI(id & mask28)
} | go/onos/ransim/types/types.go | 0.617397 | 0.595728 | types.go | starcoder |
package timeseries
import "math"
func (ts TimeSeries) SimpleMovingAverage(n int) TimeSeries {
if ts.Len() == 0 {
return ts
}
sma := []TimePoint{ts[0]}
// It's not possible to calculate MA if n greater than number of points
n = int(math.Min(float64(ts.Len()), float64(n)))
// Initial window, use simple moving average
windowCount := 0
var windowSum float64 = 0
for i := n; i > 0; i-- {
point := ts[n-i]
if point.Value != nil {
windowSum += *point.Value
windowCount++
}
}
if windowCount > 0 {
windowAvg := windowSum / float64(windowCount)
// Actually, we should set timestamp from datapoints[n-1] and start calculation of SMA from n.
// But in order to start SMA from first point (not from Nth) we should expand time range and request N additional
// points outside left side of range. We can't do that, so this trick is used for pretty view of first N points.
// We calculate AVG for first N points, but then start from 2nd point, not from Nth. In general, it means we
// assume that previous N points (0-N, 0-(N-1), ..., 0-1) have the same average value as a first N points.
sma[0] = TimePoint{Time: ts[0].Time, Value: &windowAvg}
}
for i := 1; i < ts.Len(); i++ {
leftEdge := int(math.Max(0, float64(i-n)))
point := ts[i]
leftPoint := ts[leftEdge]
// Remove left value
if leftPoint.Value != nil {
if windowCount > 0 {
if i < n {
windowSum -= windowSum / float64(windowCount)
} else {
windowSum -= *leftPoint.Value
}
windowCount--
}
}
// Insert next value
if point.Value != nil {
windowSum += *point.Value
windowCount++
windowAvg := windowSum / float64(windowCount)
value := windowAvg
sma = append(sma, TimePoint{Time: point.Time, Value: &value})
} else {
sma = append(sma, TimePoint{Time: point.Time, Value: nil})
}
}
return sma
}
func (ts TimeSeries) ExponentialMovingAverage(an float64) TimeSeries {
if ts.Len() == 0 {
return ts
}
// It's not possible to calculate MA if n greater than number of points
an = math.Min(float64(ts.Len()), an)
// alpha coefficient should be between 0 and 1. If provided n <= 1, then use it as alpha directly. Otherwise, it's a
// number of points in the window and alpha calculted from this information.
var a float64
var n int
ema := []TimePoint{ts[0]}
var emaCurrent float64
var emaPrev float64 = 0
if ts[0].Value != nil {
emaPrev = *ts[0].Value
}
if an > 1 {
// Calculate a from window size
a = 2 / (an + 1)
n = int(an)
// Initial window, use simple moving average
windowCount := 0
var windowSum float64 = 0
for i := n; i > 0; i-- {
point := ts[n-i]
if point.Value != nil {
windowSum += *point.Value
windowCount++
}
}
if windowCount > 0 {
windowAvg := windowSum / float64(windowCount)
// Actually, we should set timestamp from datapoints[n-1] and start calculation of EMA from n.
// But in order to start EMA from first point (not from Nth) we should expand time range and request N additional
// points outside left side of range. We can't do that, so this trick is used for pretty view of first N points.
// We calculate AVG for first N points, but then start from 2nd point, not from Nth. In general, it means we
// assume that previous N values (0-N, 0-(N-1), ..., 0-1) have the same average value as a first N values.
ema[0] = TimePoint{Time: ts[0].Time, Value: &windowAvg}
emaPrev = windowAvg
n = 1
}
} else {
// Use predefined a and start from 1st point (use it as initial EMA value)
a = an
n = 1
}
for i := n; i < ts.Len(); i++ {
point := ts[i]
if point.Value != nil {
emaCurrent = a*(*point.Value) + (1-a)*emaPrev
emaPrev = emaCurrent
value := emaCurrent
ema = append(ema, TimePoint{Time: point.Time, Value: &value})
} else {
ema = append(ema, TimePoint{Time: point.Time, Value: nil})
}
}
return ema
} | pkg/timeseries/moving_average.go | 0.782953 | 0.573678 | moving_average.go | starcoder |
package webp
import (
"image/color"
"reflect"
)
type MemPColor struct {
Channels int
DataType reflect.Kind
Pix PixSlice
}
func (c MemPColor) RGBA() (r, g, b, a uint32) {
if len(c.Pix) == 0 {
return
}
switch c.Channels {
case 1:
switch reflect.Kind(c.DataType) {
case reflect.Uint8:
return color.Gray{
Y: c.Pix[0],
}.RGBA()
case reflect.Uint16:
return color.Gray16{
Y: c.Pix.Uint16s()[0],
}.RGBA()
default:
return color.Gray16{
Y: uint16(c.Pix.Value(0, reflect.Kind(c.DataType))),
}.RGBA()
}
case 2:
switch reflect.Kind(c.DataType) {
case reflect.Uint8:
return color.RGBA{
R: c.Pix[0],
G: c.Pix[1],
B: 0xFF,
A: 0xFF,
}.RGBA()
case reflect.Uint16:
return color.RGBA64{
R: c.Pix.Uint16s()[0],
G: c.Pix.Uint16s()[1],
B: 0xFFFF,
A: 0xFFFF,
}.RGBA()
default:
return color.RGBA64{
R: uint16(c.Pix.Value(0, reflect.Kind(c.DataType))),
G: uint16(c.Pix.Value(1, reflect.Kind(c.DataType))),
B: 0xFFFF,
A: 0xFFFF,
}.RGBA()
}
case 3:
switch reflect.Kind(c.DataType) {
case reflect.Uint8:
return color.RGBA{
R: c.Pix[0],
G: c.Pix[1],
B: c.Pix[2],
A: 0xFF,
}.RGBA()
case reflect.Uint16:
return color.RGBA64{
R: c.Pix.Uint16s()[0],
G: c.Pix.Uint16s()[1],
B: c.Pix.Uint16s()[2],
A: 0xFFFF,
}.RGBA()
default:
return color.RGBA64{
R: uint16(c.Pix.Value(0, reflect.Kind(c.DataType))),
G: uint16(c.Pix.Value(1, reflect.Kind(c.DataType))),
B: uint16(c.Pix.Value(2, reflect.Kind(c.DataType))),
A: 0xFFFF,
}.RGBA()
}
case 4:
switch reflect.Kind(c.DataType) {
case reflect.Uint8:
return color.RGBA{
R: c.Pix[0],
G: c.Pix[1],
B: c.Pix[2],
A: c.Pix[3],
}.RGBA()
case reflect.Uint16:
return color.RGBA64{
R: c.Pix.Uint16s()[0],
G: c.Pix.Uint16s()[1],
B: c.Pix.Uint16s()[2],
A: c.Pix.Uint16s()[3],
}.RGBA()
default:
return color.RGBA64{
R: uint16(c.Pix.Value(0, reflect.Kind(c.DataType))),
G: uint16(c.Pix.Value(1, reflect.Kind(c.DataType))),
B: uint16(c.Pix.Value(2, reflect.Kind(c.DataType))),
A: uint16(c.Pix.Value(3, reflect.Kind(c.DataType))),
}.RGBA()
}
}
return
}
type ColorModelInterface interface {
Channels() int
DataType() reflect.Kind
}
type _ColorModelT struct {
XChannels int
XDataType reflect.Kind
}
var (
_ ColorModelInterface = _ColorModelT{1, reflect.Uint8}
)
func (m _ColorModelT) Convert(c color.Color) color.Color {
return colorModelConvert(m.XChannels, m.XDataType, c)
}
func (m _ColorModelT) Channels() int {
return m.XChannels
}
func (m _ColorModelT) DataType() reflect.Kind {
return m.XDataType
}
func ColorModel(channels int, dataType reflect.Kind) color.Model {
return _ColorModelT{
XChannels: channels,
XDataType: dataType,
}
}
func colorModelConvert(channels int, dataType reflect.Kind, c color.Color) color.Color {
c2 := MemPColor{
Channels: channels,
DataType: dataType,
Pix: make(PixSlice, channels*SizeofKind(dataType)),
}
if c1, ok := c.(MemPColor); ok {
if c1.Channels == c2.Channels && c1.DataType == c2.DataType {
copy(c2.Pix, c1.Pix)
return c2
}
if c1.DataType == c2.DataType {
copy(c2.Pix, c1.Pix)
return c2
}
for i := 0; i < c1.Channels && i < c2.Channels; i++ {
c2.Pix.SetValue(i, reflect.Kind(c2.DataType), c1.Pix.Value(i, reflect.Kind(c1.DataType)))
}
return c2
}
switch {
case channels == 1 && reflect.Kind(dataType) == reflect.Uint8:
v := color.GrayModel.Convert(c).(color.Gray)
c2.Pix[0] = v.Y
return c2
case channels == 1 && reflect.Kind(dataType) == reflect.Uint16:
v := color.Gray16Model.Convert(c).(color.Gray16)
c2.Pix[0] = uint8(v.Y >> 8)
c2.Pix[1] = uint8(v.Y)
return c2
case channels == 3 && reflect.Kind(dataType) == reflect.Uint8:
r, g, b, _ := c.RGBA()
c2.Pix[0] = uint8(r >> 8)
c2.Pix[1] = uint8(g >> 8)
c2.Pix[2] = uint8(b >> 8)
return c2
case channels == 3 && reflect.Kind(dataType) == reflect.Uint16:
r, g, b, _ := c.RGBA()
c2.Pix[0] = uint8(r >> 8)
c2.Pix[1] = uint8(r)
c2.Pix[2] = uint8(g >> 8)
c2.Pix[3] = uint8(g)
c2.Pix[4] = uint8(b >> 8)
c2.Pix[5] = uint8(b)
return c2
case channels == 4 && reflect.Kind(dataType) == reflect.Uint8:
r, g, b, a := c.RGBA()
c2.Pix[0] = uint8(r >> 8)
c2.Pix[1] = uint8(g >> 8)
c2.Pix[2] = uint8(b >> 8)
c2.Pix[3] = uint8(a >> 8)
return c2
case channels == 4 && reflect.Kind(dataType) == reflect.Uint16:
r, g, b, a := c.RGBA()
c2.Pix[0] = uint8(r >> 8)
c2.Pix[1] = uint8(r)
c2.Pix[2] = uint8(g >> 8)
c2.Pix[3] = uint8(g)
c2.Pix[4] = uint8(b >> 8)
c2.Pix[5] = uint8(b)
c2.Pix[6] = uint8(a >> 8)
c2.Pix[7] = uint8(a)
return c2
}
r, g, b, a := c.RGBA()
rgba := []uint32{r, g, b, a}
for i := 0; i < c2.Channels && i < len(rgba); i++ {
c2.Pix.SetValue(i, reflect.Kind(c2.DataType), float64(rgba[i]))
}
return c2
}
func SizeofKind(dataType reflect.Kind) int {
switch dataType {
case reflect.Int8:
return 1
case reflect.Int16:
return 2
case reflect.Int32:
return 4
case reflect.Int64:
return 8
case reflect.Uint8:
return 1
case reflect.Uint16:
return 2
case reflect.Uint32:
return 4
case reflect.Uint64:
return 8
case reflect.Float32:
return 4
case reflect.Float64:
return 8
case reflect.Complex64:
return 8
case reflect.Complex128:
return 16
}
return 0
}
func SizeofPixel(channels int, dataType reflect.Kind) int {
return channels * SizeofKind(dataType)
} | image_color.go | 0.550607 | 0.575111 | image_color.go | starcoder |
package redux
// Prerequisite from a source to a target.
type Prerequisite struct {
Path string // path back to target of prerequisite.
*Metadata // target's metadata upon record creation.
}
// PutPrerequisite stores the given prerequisite using a key based on the event and hash.
func (f *File) PutPrerequisite(event Event, hash Hash, prereq Prerequisite) error {
return f.Put(f.makeKey(REQUIRES, event, hash), prereq)
}
// GetPrerequisite returns the prerequisite for the event and hash.
// If the record does not exist, found is false and err is nil.
func (f *File) GetPrerequisite(event Event, hash Hash) (prereq Prerequisite, found bool, err error) {
found, err = f.Get(f.makeKey(REQUIRES, event, hash), &prereq)
return
}
type record struct {
key string
*Prerequisite
}
func prefixed(f *File, prefix string) ([]*record, error) {
rows, err := f.db.GetRecords(prefix)
if err != nil {
return nil, err
}
out := make([]*record, len(rows))
for i, row := range rows {
if decoded, err := decodePrerequisite(row.Value); err != nil {
return nil, err
} else {
out[i] = &record{row.Key, &decoded}
}
}
return out, nil
}
func (f *File) eventRecords(events ...Event) ([]*record, error) {
if len(events) == 0 {
return prefixed(f, f.makeKey(REQUIRES))
}
var records []*record
for _, event := range events {
eventRecords, err := prefixed(f, f.makeKey(REQUIRES, event))
if err != nil {
return nil, err
}
records = append(records, eventRecords...)
}
return records, nil
}
// Prerequisites returns a slice of prerequisites for the file.
func (f *File) Prerequisites(events ...Event) (out []*Prerequisite, err error) {
records, err := f.eventRecords(events...)
if err != nil {
return
}
out = make([]*Prerequisite, len(records))
for i, rec := range records {
out[i] = rec.Prerequisite
}
return
}
// PrerequisiteFiles returns a slice of *File objects for the file's prerequisites for the list of events.
func (f *File) PrerequisiteFiles(events ...Event) ([]*File, error) {
records, err := f.eventRecords(events...)
if err != nil {
return nil, err
}
out := make([]*File, len(records))
for i, rec := range records {
if file, err := rec.File(f.RootDir); err != nil {
return nil, err
} else {
out[i] = file
}
}
return out, nil
}
// DeletePrerequisite removes a single prerequisite.
func (f *File) DeletePrerequisite(event Event, hash Hash) error {
return f.Delete(f.makeKey(REQUIRES, event, hash))
}
type visitor func(*record) error
func visit(f *File, prefix string, fn visitor) error {
records, err := prefixed(f, prefix)
if err != nil {
return err
}
for _, rec := range records {
if err := fn(rec); err != nil {
return err
}
}
return nil
}
func destroy(f *File, prefix string) error {
return visit(f, prefix, func(rec *record) error {
return f.Delete(rec.key)
})
}
// DeleteAutoPrerequisites removes all of the file's system generated prerequisites.
func (f *File) DeleteAutoPrerequisites() error {
return destroy(f, f.makeKey(REQUIRES, AUTO))
}
// DeleteAllPrerequisites removed all of the file's prerequisites.
func (f *File) DeleteAllPrerequisites() error {
return destroy(f, f.makeKey(REQUIRES))
}
func (p *Prerequisite) IsCurrent(rootDir string) (isCurrent bool, err error) {
f, err := p.File(rootDir)
if err != nil {
return
}
m, err := f.NewMetadata()
if err != nil {
return
}
isCurrent = p.Equal(m)
if !isCurrent {
return
}
return f.IsCurrent()
} | prerequisite.go | 0.775265 | 0.540318 | prerequisite.go | starcoder |
package bindings
import "strconv"
type tFloat32 struct {
listeners []Float32Listener
value float32
filter Float32Filter
}
type tFloat32AB struct {
tFloat32
parentA Float32
parentB Float32
}
type tFloat32BooleanAB struct {
tBoolean
parentA Float32
parentB Float32
}
type tFloat32Divide struct {
tFloat32AB
}
type tFloat32Equal struct {
tFloat32BooleanAB
}
type tFloat32Greater struct {
tFloat32BooleanAB
}
type tFloat32GreaterOrEqual struct {
tFloat32BooleanAB
}
type tFloat32Less struct {
tFloat32BooleanAB
}
type tFloat32LessOrEqual struct {
tFloat32BooleanAB
}
type tFloat32Minus struct {
tFloat32AB
}
type tFloat32Multiply struct {
tFloat32AB
}
type tFloat32NotEqual struct {
tFloat32BooleanAB
}
type tFloat32Plus struct {
tFloat32AB
}
func (float32Value *tFloat32) AddListener(listener Float32Listener) {
if !containsFloat32Listener(float32Value.listeners, listener) {
float32Value.listeners = append(float32Value.listeners, listener)
}
}
func (float32Value *tFloat32) Divide(float32ValueB Float32) Float32 {
float32Divide := new(tFloat32Divide)
float32Divide.parentA = float32Value
float32Divide.parentB = float32ValueB
float32Value.AddListener(float32Divide)
float32ValueB.AddListener(float32Divide)
return float32Divide
}
func (float32Value *tFloat32) EqualTo(float32ValueB Float32) Boolean {
float32Equal := new(tFloat32Equal)
float32Equal.parentA = float32Value
float32Equal.parentB = float32ValueB
float32Value.AddListener(float32Equal)
float32ValueB.AddListener(float32Equal)
return float32Equal
}
func (float32Value *tFloat32) Float64() Float64 {
float64Value := new(tFloat64)
float32Value.AddListener(float64Value)
return float64Value
}
func (float32Value *tFloat32) GreaterThan(float32ValueB Float32) Boolean {
float32Greater := new(tFloat32Greater)
float32Greater.parentA = float32Value
float32Greater.parentB = float32ValueB
float32Value.AddListener(float32Greater)
float32ValueB.AddListener(float32Greater)
return float32Greater
}
func (float32Value *tFloat32) GreaterThanOrEqualTo(float32ValueB Float32) Boolean {
float32GreaterOrEqual := new(tFloat32GreaterOrEqual)
float32GreaterOrEqual.parentA = float32Value
float32GreaterOrEqual.parentB = float32ValueB
float32Value.AddListener(float32GreaterOrEqual)
float32ValueB.AddListener(float32GreaterOrEqual)
return float32GreaterOrEqual
}
func (float32Value *tFloat32) Int() Int {
intValue := new(tInt)
float32Value.AddListener(intValue)
return intValue
}
func (float32Value *tFloat32) LessThan(float32ValueB Float32) Boolean {
float32Less := new(tFloat32Less)
float32Less.parentA = float32Value
float32Less.parentB = float32ValueB
float32Value.AddListener(float32Less)
float32ValueB.AddListener(float32Less)
return float32Less
}
func (float32Value *tFloat32) LessThanOrEqualTo(float32ValueB Float32) Boolean {
float32LessOrEqual := new(tFloat32LessOrEqual)
float32LessOrEqual.parentA = float32Value
float32LessOrEqual.parentB = float32ValueB
float32Value.AddListener(float32LessOrEqual)
float32ValueB.AddListener(float32LessOrEqual)
return float32LessOrEqual
}
func (float32Value *tFloat32) Minus(float32ValueB Float32) Float32 {
float32Minus := new(tFloat32Minus)
float32Minus.parentA = float32Value
float32Minus.parentB = float32ValueB
float32Value.AddListener(float32Minus)
float32ValueB.AddListener(float32Minus)
return float32Minus
}
func (float32Value *tFloat32) Multiply(float32ValueB Float32) Float32 {
float32Multiply := new(tFloat32Multiply)
float32Multiply.parentA = float32Value
float32Multiply.parentB = float32ValueB
float32Value.AddListener(float32Multiply)
float32ValueB.AddListener(float32Multiply)
return float32Multiply
}
func (float32Value *tFloat32) NotEqualTo(float32ValueB Float32) Boolean {
float32ValueNotEqual := new(tFloat32NotEqual)
float32ValueNotEqual.parentA = float32Value
float32ValueNotEqual.parentB = float32ValueB
float32Value.AddListener(float32ValueNotEqual)
float32ValueB.AddListener(float32ValueNotEqual)
return float32ValueNotEqual
}
func (float32Value *tFloat32) Plus(float32ValueB Float32) Float32 {
float32Plus := new(tFloat32Plus)
float32Plus.parentA = float32Value
float32Plus.parentB = float32ValueB
float32Value.AddListener(float32Plus)
float32ValueB.AddListener(float32Plus)
return float32Plus
}
func (float32Value *tFloat32) RemoveListener(listener Float32Listener) {
i := indexFloat32Listener(float32Value.listeners, listener)
if i >= 0 {
float32Value.listeners = removeFloat32Listener(float32Value.listeners, i)
}
}
func (float32Value *tFloat32) Set(newValue float32) {
oldValue := float32Value.value
if float32Value.filter != nil {
newValue = float32Value.filter.FilterFloat32(float32Value, oldValue, newValue)
}
if float32Value.value != newValue {
observable := Float32(float32Value)
float32Value.value = newValue
for _, listener := range float32Value.listeners {
listener.Float32Changed(observable, oldValue, newValue)
}
}
}
func (float32Value *tFloat32) SetFilter(filter Float32Filter) {
float32Value.filter = filter
}
func (float32Value *tFloat32) String() String {
stringValue := new(tString)
float32Value.AddListener(stringValue)
return stringValue
}
func (float32Value *tFloat32) Value() float32 {
return float32Value.value
}
func (float32Value *tFloat32) Float64Changed(observable Float64, oldValue, newValue float64) {
float32Value.Set(float32(newValue))
}
func (float32Value *tFloat32) IntChanged(observable Int, oldValue, newValue int) {
float32Value.Set(float32(newValue))
}
func (float32Value *tFloat32) StringChanged(observable String, oldValue, newValue string) {
if val, err := strconv.ParseFloat(newValue, 32); err == nil {
float32Value.Set(float32(val))
}
}
func (float32Value *tFloat32Divide) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(newValue / float32Value.parentB.Value())
} else {
float32Value.Set(float32Value.parentA.Value() / newValue)
}
}
func (float32Value *tFloat32Equal) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() == newValue)
} else {
float32Value.Set(float32Value.parentA.Value() == newValue)
}
}
func (float32Value *tFloat32Greater) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() < newValue)
} else {
float32Value.Set(float32Value.parentA.Value() > newValue)
}
}
func (float32Value *tFloat32GreaterOrEqual) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() <= newValue)
} else {
float32Value.Set(float32Value.parentA.Value() >= newValue)
}
}
func (float32Value *tFloat32Less) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() > newValue)
} else {
float32Value.Set(float32Value.parentA.Value() < newValue)
}
}
func (float32Value *tFloat32LessOrEqual) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() >= newValue)
} else {
float32Value.Set(float32Value.parentA.Value() <= newValue)
}
}
func (float32Value *tFloat32Minus) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(newValue - float32Value.parentB.Value())
} else {
float32Value.Set(float32Value.parentA.Value() - newValue)
}
}
func (float32Value *tFloat32Multiply) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() * newValue)
} else {
float32Value.Set(float32Value.parentA.Value() * newValue)
}
}
func (float32Value *tFloat32NotEqual) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() != newValue)
} else {
float32Value.Set(float32Value.parentA.Value() != newValue)
}
}
func (float32Value *tFloat32Plus) Float32Changed(observable Float32, oldValue, newValue float32) {
if float32Value.parentA == observable {
float32Value.Set(float32Value.parentB.Value() + newValue)
} else {
float32Value.Set(float32Value.parentA.Value() + newValue)
}
} | float32.go | 0.760473 | 0.504883 | float32.go | starcoder |
package proto
import "github.com/go-faster/errors"
// Compile-time assertions for ColNullableOf.
var (
_ ColInput = (*ColNullableOf[string])(nil)
_ ColResult = (*ColNullableOf[string])(nil)
_ Column = (*ColNullableOf[string])(nil)
_ ColumnOf[Nullable[string]] = (*ColNullableOf[string])(nil)
_ StateEncoder = (*ColNullableOf[string])(nil)
_ StateDecoder = (*ColNullableOf[string])(nil)
_ = ColNullableOf[string]{
Values: new(ColStr),
}
)
// Nullable is T value that can be null.
type Nullable[T any] struct {
Set bool
Value T
}
// NewNullable returns set value of Nullable[T] to v.
func NewNullable[T any](v T) Nullable[T] {
return Nullable[T]{Set: true, Value: v}
}
// Null returns null value for Nullable[T].
func Null[T any]() Nullable[T] {
return Nullable[T]{}
}
func (n Nullable[T]) IsSet() bool { return n.Set }
func (n Nullable[T]) Or(v T) T {
if n.Set {
return v
}
return n.Value
}
// ColNullableOf is Nullable(T) column.
type ColNullableOf[T any] struct {
Nulls ColUInt8
Values ColumnOf[T]
}
func (c *ColNullableOf[T]) DecodeState(r *Reader) error {
if s, ok := c.Values.(StateDecoder); ok {
if err := s.DecodeState(r); err != nil {
return errors.Wrap(err, "values state")
}
}
return nil
}
func (c ColNullableOf[T]) EncodeState(b *Buffer) {
if s, ok := c.Values.(StateEncoder); ok {
s.EncodeState(b)
}
}
func (c ColNullableOf[T]) Type() ColumnType {
return ColumnTypeNullable.Sub(c.Values.Type())
}
func (c *ColNullableOf[T]) DecodeColumn(r *Reader, rows int) error {
if err := c.Nulls.DecodeColumn(r, rows); err != nil {
return errors.Wrap(err, "nulls")
}
if err := c.Values.DecodeColumn(r, rows); err != nil {
return errors.Wrap(err, "values")
}
return nil
}
func (c ColNullableOf[T]) Rows() int {
return c.Nulls.Rows()
}
func (c *ColNullableOf[T]) Append(v Nullable[T]) {
null := boolTrue
if v.Set {
null = boolFalse
}
c.Nulls.Append(null)
c.Values.Append(v.Value)
}
func (c *ColNullableOf[T]) AppendArr(v []Nullable[T]) {
for _, vv := range v {
c.Append(vv)
}
}
func (c ColNullableOf[T]) Row(i int) Nullable[T] {
return Nullable[T]{
Value: c.Values.Row(i),
Set: c.Nulls.Row(i) == boolFalse,
}
}
func (c *ColNullableOf[T]) Reset() {
c.Nulls.Reset()
c.Values.Reset()
}
func (c ColNullableOf[T]) EncodeColumn(b *Buffer) {
c.Nulls.EncodeColumn(b)
c.Values.EncodeColumn(b)
}
func (c ColNullableOf[T]) IsElemNull(i int) bool {
if i < c.Rows() {
return c.Nulls[i] == boolTrue
}
return false
} | proto/col_nullable_of.go | 0.665519 | 0.423995 | col_nullable_of.go | starcoder |
package dhcpv6
// This module defines the OptIAForPrefixDelegation structure.
// https://www.ietf.org/rfc/rfc3633.txt
import (
"encoding/binary"
"fmt"
)
type OptIAForPrefixDelegation struct {
iaId [4]byte
t1 uint32
t2 uint32
options []byte
}
func (op *OptIAForPrefixDelegation) Code() OptionCode {
return OPTION_IA_PD
}
func (op *OptIAForPrefixDelegation) ToBytes() []byte {
buf := make([]byte, 16)
binary.BigEndian.PutUint16(buf[0:2], uint16(OPTION_IA_PD))
binary.BigEndian.PutUint16(buf[2:4], uint16(op.Length()))
copy(buf[4:8], op.iaId[:])
binary.BigEndian.PutUint32(buf[8:12], op.t1)
binary.BigEndian.PutUint32(buf[12:16], op.t2)
buf = append(buf, op.options...)
return buf
}
func (op *OptIAForPrefixDelegation) IAID() []byte {
return op.iaId[:]
}
func (op *OptIAForPrefixDelegation) SetIAID(iaId [4]byte) {
op.iaId = iaId
}
func (op *OptIAForPrefixDelegation) T1() uint32 {
return op.t1
}
func (op *OptIAForPrefixDelegation) SetT1(t1 uint32) {
op.t1 = t1
}
func (op *OptIAForPrefixDelegation) T2() uint32 {
return op.t2
}
func (op *OptIAForPrefixDelegation) SetT2(t2 uint32) {
op.t2 = t2
}
func (op *OptIAForPrefixDelegation) Options() []byte {
return op.options
}
func (op *OptIAForPrefixDelegation) SetOptions(options []byte) {
op.options = options
}
func (op *OptIAForPrefixDelegation) Length() int {
return 12 + len(op.options)
}
func (op *OptIAForPrefixDelegation) String() string {
return fmt.Sprintf("OptIAForPrefixDelegation{IAID=%v, t1=%v, t2=%v, options=%v}",
op.iaId, op.t1, op.t2, op.options)
}
// build an OptIAForPrefixDelegation structure from a sequence of bytes.
// The input data does not include option code and length bytes.
func ParseOptIAForPrefixDelegation(data []byte) (*OptIAForPrefixDelegation, error) {
opt := OptIAForPrefixDelegation{}
if len(data) < 12 {
return nil, fmt.Errorf("Invalid IA for Prefix Delegation data length. Expected at least 12 bytes, got %v", len(data))
}
copy(opt.iaId[:], data[:4])
opt.t1 = binary.BigEndian.Uint32(data[4:8])
opt.t2 = binary.BigEndian.Uint32(data[8:12])
opt.options = append(opt.options, data[12:]...)
return &opt, nil
} | dhcpv6/option_prefixdelegation.go | 0.698741 | 0.424293 | option_prefixdelegation.go | starcoder |
package origins
// Comparator is a function type that compares two facts for the purposes of sorting.
// It returns true if the first fact should come before the second fact.
type Comparator func(f1, f2 *Fact) bool
const (
compTrue int8 = iota - 1
compEqual
compFalse
)
// identComparator compares two Ident values by domain then local name.
// This comparator is used as the basis for other identity-based comparators and
// therefore returns compTrue, compFalse or compEqual with whether the left is less than the
// right.
func identComparator(i1, i2 *Ident) int8 {
// Compare pointer values. No change.
if i1 == i2 {
return compEqual
}
// First check does not pass.
if i1.Domain > i2.Domain {
return compFalse
} else if i1.Domain < i2.Domain {
return compTrue
}
if i1.Name > i2.Name {
return compFalse
} else if i1.Name < i2.Name {
return compTrue
}
return compEqual
}
// IdentComparator compares to identities.
func IdentComparator(id1, id2 *Ident) bool {
if identComparator(id1, id2) == compTrue {
return true
}
return false
}
// EntityComparator compares two entity identities.
func EntityComparator(f1, f2 *Fact) bool {
return IdentComparator(f1.Entity, f2.Entity)
}
// AttributeComparator compares two attribute identities.
func AttributeComparator(f1, f2 *Fact) bool {
return IdentComparator(f1.Attribute, f2.Attribute)
}
// ValueComparator compares two value identities.
func ValueComparator(f1, f2 *Fact) bool {
return IdentComparator(f1.Value, f2.Value)
}
// TransactionComparator compares two value identities.
func TransactionComparator(f1, f2 *Fact) bool {
return f1.Transaction < f2.Transaction
}
// TimeComparator compares two times.
func TimeComparator(f1, f2 *Fact) bool {
return f1.Time.Before(f2.Time)
}
// EAVTComparator compares two facts using an entity-attribute-value-time sort.
func EAVTComparator(f1, f2 *Fact) bool {
switch identComparator(f1.Entity, f2.Entity) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Attribute, f2.Attribute) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Value, f2.Value) {
case compTrue:
return true
case compFalse:
return false
}
return TransactionComparator(f1, f2)
}
// AEVTComparator compares two facts using an attribute-entity-value-time sort.
func AEVTComparator(f1, f2 *Fact) bool {
switch identComparator(f1.Attribute, f2.Attribute) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Entity, f2.Entity) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Value, f2.Value) {
case compTrue:
return true
case compFalse:
return false
}
return TransactionComparator(f1, f2)
}
// AVETComparator compares two facts using an attribute-value-entity-time sort.
func AVETComparator(f1, f2 *Fact) bool {
switch identComparator(f1.Attribute, f2.Attribute) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Value, f2.Value) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Entity, f2.Entity) {
case compTrue:
return true
case compFalse:
return false
}
return TransactionComparator(f1, f2)
}
// VAETComparator compares two facts using an value-attribute-entity-time sort.
func VAETComparator(f1, f2 *Fact) bool {
switch identComparator(f1.Value, f2.Value) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Attribute, f2.Attribute) {
case compTrue:
return true
case compFalse:
return false
}
switch identComparator(f1.Entity, f2.Entity) {
case compTrue:
return true
case compFalse:
return false
}
return TransactionComparator(f1, f2)
} | comparator.go | 0.837587 | 0.552238 | comparator.go | starcoder |
package biooperators
import (
"fmt"
"math"
"sort"
"github.com/CRAB-LAB-NTNU/PPS-BS/types"
)
/*CalculateIdealPoints calculates the ideal point in a population,
IE. the point in the search space by picking the best function value for all objective functions.
*/
func CalculateIdealPoints(population []types.Individual) []float64 {
oc := population[0].Fitness().ObjectiveCount
point := make([]float64, oc)
for i := 0; i < oc; i++ {
point[i] = math.MaxFloat64
}
for _, ind := range population {
fitness := ind.Fitness()
for j := 0; j < fitness.ObjectiveCount; j++ {
if fitness.ObjectiveValues[j] < point[j] {
point[j] = fitness.ObjectiveValues[j]
}
}
}
return point
}
func CalculateNadirPoints(population []types.Individual) []float64 {
oc := population[0].Fitness().ObjectiveCount
point := make([]float64, oc)
for i := 0; i < oc; i++ {
point[i] = math.SmallestNonzeroFloat64
}
for _, ind := range population {
fitness := ind.Fitness()
for j := 0; j < fitness.ObjectiveCount; j++ {
if fitness.ObjectiveValues[j] > point[j] {
point[j] = fitness.ObjectiveValues[j]
}
}
}
return point
}
func CalculateNadirAndIdealPoints(population []types.Individual) ([]float64, []float64) {
return CalculateIdealPoints(population), CalculateNadirPoints(population)
}
func FastNonDominatedSort(population []types.Individual) [][]types.Individual {
fronts := make([][]types.Individual, len(population))
sets := make([][]types.Individual, len(population))
n := make([]int, len(population))
indexLookup := make(map[types.Individual]int)
for p := range population {
indexLookup[population[p]] = p
for q := range population {
if Dominates(population[p], population[q]) {
sets[p] = append(sets[p], population[q])
} else if Dominates(population[q], population[p]) {
n[p]++
}
}
if n[p] == 0 {
fronts[0] = append(fronts[0], population[p])
}
}
i := 0
for len(fronts[i]) > 0 {
var H []types.Individual
for p := range fronts[i] {
p = indexLookup[fronts[i][p]]
for q := range sets[p] {
q = indexLookup[sets[p][q]]
n[q]--
if n[q] == 0 {
H = append(H, population[q])
}
}
}
i++
if i == len(fronts) {
break
}
fronts[i] = H
}
for j := len(fronts) - 1; j >= 0; j-- {
if len(fronts[j]) > 0 {
break
}
fronts = fronts[:j]
}
return fronts
}
func Dominates(p, q types.Individual) bool {
qf, pf := q.Fitness(), p.Fitness()
for i := range qf.ObjectiveValues {
if qf.ObjectiveValues[i] < pf.ObjectiveValues[i] {
return false
}
}
return true
}
func CrowdingDistance(population []types.Individual) map[int]float64 {
lookup := make(map[types.Individual]int)
for i, p := range population {
lookup[p] = i
}
distances := make(map[int]float64)
l := len(population) - 1
for m := range population[0].Fitness().ObjectiveValues {
sorted := SortByValue(population, m)
distances[lookup[sorted[0]]], distances[lookup[sorted[l]]] = math.MaxFloat64, math.MaxFloat64
for i := 1; i < l-1; i++ {
distances[lookup[sorted[i]]] += sorted[i+1].Fitness().ObjectiveValues[m] - sorted[i-1].Fitness().ObjectiveValues[m]
}
}
return distances
}
func SortByValue(population []types.Individual, index int) []types.Individual {
sorted := make([]types.Individual, len(population))
copy(sorted, population)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Fitness().ObjectiveValues[index] < sorted[j].Fitness().ObjectiveValues[index]
})
return sorted
}
func PrintIndividualToGeogebraPoint(ind types.Individual) {
fmt.Print("(")
for i, m := range ind.Fitness().ObjectiveValues {
fmt.Print(m)
if i != len(ind.Fitness().ObjectiveValues)-1 {
fmt.Print(",")
}
}
fmt.Println(")")
}
func UnionPopulations(a, b []types.Individual) []types.Individual {
check := make(map[types.Individual]bool)
var union []types.Individual
counter := 0
for _, ind := range a {
if _, ok := check[ind]; ok {
counter++
}
check[ind] = true
union = append(union, ind)
}
for _, ind := range b {
if _, ok := check[ind]; !ok {
union = append(union, ind)
}
}
return union
} | biooperators/populations.go | 0.607197 | 0.484441 | populations.go | starcoder |
package p352
/**
Given a data stream input of non-negative integers a1, a2, ..., an, ...,
summarize the numbers seen so far as a list of disjoint intervals.
For example, suppose the integers from the data stream are 1, 3, 7, 2, 6, ...,
then the summary will be:
[1, 1]
[1, 1], [3, 3]
[1, 1], [3, 3], [7, 7]
[1, 3], [7, 7]
[1, 3], [6, 7]
Follow up:
What if there are lots of merges and the number of disjoint intervals are small
compared to the data stream's size?
*/
/**
* Definition for an interval.
**/
type Interval struct {
Start int
End int
}
type BSTreeNode struct {
val Interval
left *BSTreeNode
right *BSTreeNode
}
func newNode(v int) *BSTreeNode {
node := BSTreeNode{
val: Interval{v, v},
}
return &node
}
func findMin(node *BSTreeNode) *BSTreeNode {
if node == nil {
return nil
} else if node.left == nil {
return node
} else {
return findMin(node.left)
}
}
func addKey(val int, node *BSTreeNode) *BSTreeNode {
if node == nil {
node = newNode(val)
} else if node.val.Start > val {
node.left = addKey(val, node.left)
} else if node.val.End < val {
node.right = addKey(val, node.right)
}
return node
}
func remove(x *Interval, node *BSTreeNode) *BSTreeNode {
if node == nil || x == nil {
return node
} else if x.Start > node.val.End {
node.right = remove(x, node.right)
} else if x.End < node.val.Start {
node.left = remove(x, node.left)
} else if node.left != nil && node.right != nil {
node.val = findMin(node.right).val
node.right = remove(&node.val, node.right)
} else {
if node.left != nil {
node = node.left
} else {
node = node.right
}
}
return node
}
type SummaryRanges struct {
size int
root *BSTreeNode
}
/** Initialize your data structure here. */
func Constructor() SummaryRanges {
return SummaryRanges{
size: 0,
root: nil,
}
}
func findKey(val int, node *BSTreeNode) *BSTreeNode {
if node == nil {
return nil
}
if node.val.Start > val {
return findKey(val, node.left)
} else if node.val.End < val {
return findKey(val, node.right)
} else {
return node
}
}
func (this *SummaryRanges) Addnum(val int) {
if this.root == nil {
this.size++
this.root = newNode(val)
} else {
if findKey(val, this.root) != nil {
return
}
left := findKey(val-1, this.root)
right := findKey(val+1, this.root)
if left == nil && right == nil {
this.root = addKey(val, this.root)
this.size++
} else if left != nil && right == nil {
left.val.End++
} else if left == nil && right != nil {
right.val.Start--
} else {
end := right.val.End
this.root = remove(&right.val, this.root)
left.val.End = end
this.size--
}
}
}
func (this *SummaryRanges) Getintervals() []Interval {
ans := make([]Interval, 0, this.size)
var inorder func(node *BSTreeNode)
inorder = func(node *BSTreeNode) {
if node != nil {
inorder(node.left)
ans = append(ans, node.val)
inorder(node.right)
}
}
inorder(this.root)
return ans
}
/**
* Your SummaryRanges object will be instantiated and called as such:
* obj := Constructor();
* obj.Addnum(val);
* param_2 := obj.Getintervals();
*/ | algorithms/p352/352.go | 0.91181 | 0.835316 | 352.go | starcoder |
package algo
import (
"github.com/puppetlabs/leg/datastructure"
"github.com/puppetlabs/leg/graph"
)
const (
PrimMinimumSpanningTreeSupportedFeatures = graph.DeterministicIteration
)
type PrimMinimumSpanningTree struct {
TotalWeight float64
features graph.GraphFeature
es graph.MutableEdgeSet
}
func (mst *PrimMinimumSpanningTree) Features() graph.GraphFeature {
return mst.features
}
func (mst *PrimMinimumSpanningTree) Edges() graph.EdgeSet {
return mst.es
}
func PrimMinimumSpanningTreeOf(g graph.UndirectedGraph) *PrimMinimumSpanningTree {
vs := g.Vertices()
var (
es graph.MutableEdgeSet
unspanned datastructure.Set
)
if g.Features()&graph.DeterministicIteration != 0 {
es = graph.NewMutableEdgeSet(datastructure.NewLinkedHashSet())
unspanned = datastructure.NewLinkedHashSetWithCapacity(int(vs.Count()))
} else {
es = graph.NewMutableEdgeSet(datastructure.NewHashSet())
unspanned = datastructure.NewHashSetWithCapacity(int(vs.Count()))
}
mst := &PrimMinimumSpanningTree{
features: g.Features() & PrimMinimumSpanningTreeSupportedFeatures,
es: es,
}
vs.ForEach(func(vertex graph.Vertex) error {
unspanned.Add(vertex)
return nil
})
for !unspanned.Empty() {
var root graph.Vertex
unspanned.ForEachInto(func(vertex graph.Vertex) error {
root = vertex
return datastructure.ErrStopIteration
})
unspanned.Remove(root)
dangling := datastructure.NewPriorityQueue()
edges, _ := g.EdgesOf(root)
edges.ForEach(func(edge graph.Edge) error {
weight, _ := g.WeightOf(edge)
dangling.Add(edge, -weight)
return nil
})
var next graph.Edge
for dangling.PollInto(&next) {
target, _ := g.SourceVertexOf(next)
if !unspanned.Contains(target) {
target, _ = g.TargetVertexOf(next)
if !unspanned.Contains(target) {
continue
}
}
mst.es.Add(next)
unspanned.Remove(target)
edges, _ := g.EdgesOf(target)
edges.ForEach(func(edge graph.Edge) error {
candidate, _ := graph.OppositeVertexOf(g, edge, target)
if !unspanned.Contains(candidate) {
return nil
}
weight, _ := g.WeightOf(edge)
dangling.Add(edge, -weight)
return nil
})
}
}
mst.es.ForEach(func(edge graph.Edge) error {
weight, _ := g.WeightOf(edge)
mst.TotalWeight += weight
return nil
})
return mst
} | graph/algo/prim.go | 0.653459 | 0.446917 | prim.go | starcoder |
package src
import (
"bytes"
"math/rand"
)
// Field represents a two-dimensional field of cells.
type Field struct {
states [][]bool
width int
height int
}
// NewField returns an empty field of the specified width and height.
func NewField(w, h int) *Field {
s := make([][]bool, h)
for i := range s {
s[i] = make([]bool, w)
}
return &Field{states: s, width: w, height: h}
}
// Set sets the state of the specified cell to the given value.
func (field *Field) Set(x, y int, b bool) {
field.states[y][x] = b
}
// Alive reports whether the specified cell is alive.
// If the x or y coordinates are outside the field boundaries they are wrapped
// toroidally. For instance, an x value of -1 is treated as width-1.
func (field *Field) Alive(x, y int) bool {
x += field.width
x %= field.width
y += field.height
y %= field.height
return field.states[y][x]
}
// Next returns the state of the specified cell at the next time step.
func (field *Field) Next(x, y int) bool {
// Count the adjacent cells that are alive.
alive := 0
for i := -1; i <= 1; i++ {
for j := -1; j <= 1; j++ {
if (j != 0 || i != 0) && field.Alive(x+i, y+j) {
alive++
}
}
}
// Return next state according to the game rules:
// exactly 3 neighbors: on,
// exactly 2 neighbors: maintain current state,
// otherwise: off.
return alive == 3 || alive == 2 && field.Alive(x, y)
}
// Life stores the state of a round of Conway's Game of Life.
type Life struct {
a, b *Field
width int
height int
}
// NewLife returns a new Life game state with a random initial state.
func NewLife(width, height int) *Life {
a := NewField(width, height)
for i := 0; i < (width * height / 4); i++ {
a.Set(rand.Intn(width), rand.Intn(height), true)
}
return &Life{
a: a, b: NewField(width, height),
width: width, height: height,
}
}
// Step advances the game by one instant, recomputing and updating all cells.
func (life *Life) Step() {
// Update the state of the next field (b) from the current field (a).
for y := 0; y < life.height; y++ {
for x := 0; x < life.width; x++ {
life.b.Set(x, y, life.a.Next(x, y))
}
}
// Swap fields a and b.
life.a, life.b = life.b, life.a
}
// String returns the game board as a string.
func (life *Life) String() string {
var buf bytes.Buffer
for y := 0; y < life.height; y++ {
for x := 0; x < life.width; x++ {
b := " "
if life.a.Alive(x, y) {
b = "\u2588\u2588"
}
buf.WriteString(b)
}
buf.WriteByte('\n')
}
return buf.String()
} | src/conways_game_of_life.go | 0.797281 | 0.451266 | conways_game_of_life.go | starcoder |
package yrsensor
import (
"fmt"
"github.com/perbu/yrpoller/timestream"
log "github.com/sirupsen/logrus"
"time"
)
func interpolateObservations(first *Observation, last *Observation, when time.Time) Observation {
var obs Observation
timeDelta := last.Time.Sub(first.Time).Seconds() // Typically 60mins
howFarInto := when.Sub(first.Time).Seconds()
factor := float64(howFarInto) / float64(timeDelta)
obs.Time = when
// Interpolating here:
obs.AirTemperature = last.AirTemperature*factor + first.AirTemperature*(1.0-factor)
obs.AirPressureAtSeaLevel = last.AirPressureAtSeaLevel*factor + first.AirPressureAtSeaLevel*(1.0-factor)
obs.RelativeHumidity = last.RelativeHumidity*factor + first.RelativeHumidity*(1.0-factor)
obs.WindSpeed = last.WindSpeed*factor + first.WindSpeed*(1.0-factor)
obs.WindFromDirection = last.WindFromDirection*factor + first.WindFromDirection*(1.0-factor)
return obs
}
// Emit data
func emitLocation(tsconfig timestream.TimestreamState, location Location,
timeseries *ObservationTimeSeries, when time.Time) {
var obs Observation
firstAfter := 0
// Find out where we are in the time series.
for i := range timeseries.ts {
if timeseries.ts[i].Time.After(when) {
firstAfter = i
break
}
}
// First measurement is still in the future so we can't interpolate:
if firstAfter == 0 {
obs.Time = timeseries.ts[0].Time
obs.AirTemperature = timeseries.ts[0].AirTemperature
obs.AirPressureAtSeaLevel = timeseries.ts[0].AirPressureAtSeaLevel
obs.RelativeHumidity = timeseries.ts[0].RelativeHumidity
obs.WindSpeed = timeseries.ts[0].WindSpeed
obs.WindFromDirection = timeseries.ts[0].WindFromDirection
} else {
// Interpolate the two relevant measurements
last := timeseries.ts[firstAfter]
first := timeseries.ts[firstAfter-1]
obs = interpolateObservations(&first, &last, when)
}
// add the Id (place)
obs.Id = location.Id
// jsonData, err := json.MarshalIndent(obs, "TS: ", " ")
tsconfig.MakeEntry(timestream.TimestreamEntry{
Time: obs.Time,
SensorId: obs.Id,
TableName: "air_temperature",
Value: fmt.Sprintf("%v", obs.AirTemperature),
})
tsconfig.MakeEntry(timestream.TimestreamEntry{
Time: obs.Time,
SensorId: obs.Id,
TableName: "air_pressure_at_sealevel",
Value: fmt.Sprintf("%v", obs.AirPressureAtSeaLevel),
})
tsconfig.MakeEntry(timestream.TimestreamEntry{
Time: obs.Time,
SensorId: obs.Id,
TableName: "relative_humidity",
Value: fmt.Sprintf("%v", obs.RelativeHumidity),
})
tsconfig.MakeEntry(timestream.TimestreamEntry{
Time: obs.Time,
SensorId: obs.Id,
TableName: "wind_speed",
Value: fmt.Sprintf("%v", obs.WindSpeed),
})
tsconfig.MakeEntry(timestream.TimestreamEntry{
Time: obs.Time,
SensorId: obs.Id,
TableName: "wind_from_direction",
Value: fmt.Sprintf("%v", obs.WindFromDirection),
})
return
}
// waits for observations to arrive. Returns true or false
// false if not enough observations are present.
// true if the number of obs matches the fc cache.
// We don't lock here, so we are subject to races. But
// the worst that could happen is that we delay startup a few
// milliseconds.
func waitForObservations(fc *ObservationCache, locs *Locations) bool {
if len(fc.observations) == len(locs.Locations) {
log.Debug("(emitter) Observations are present.")
return true
} else {
log.Debug("(emitter) Observations are not yet present.")
}
return false
}
func emitter(config *EmitterConfig) {
var previousEmit time.Time
log.Info("Starting emitter")
tsconfig := timestream.Factory(config.AwsRegion, config.AwsTimestreamDbname)
err := tsconfig.CheckAndCreateTables([]string{
"air_temperature", "air_pressure_at_sealevel", "relative_humidity",
"wind_speed", "wind_from_direction"})
if err != nil {
panic(err.Error())
}
for waitForObservations(config.ObservationCachePtr, &config.Locations) == false {
time.Sleep(100 * time.Millisecond)
}
// run until until the channel closes.
for {
select {
default:
emitNeeded := time.Now().UTC().Sub(previousEmit) > config.EmitterInterval
if emitNeeded {
log.Debug("(emitter) Emit triggered")
for _, loc := range config.Locations.Locations {
log.Debugf("(emitter) Requesting obs for loc %s", loc.Id)
resCh := make(chan ObservationTimeSeries)
config.TsRequestChannel <- TimeSeriesRequest{
Location: loc.Id,
ResponseChannel: resCh,
}
resTimeSeries := <-resCh
emitLocation(tsconfig, loc, &resTimeSeries, time.Now().UTC())
}
errs := tsconfig.FlushAwsTimestreamWrites()
if len(errs) > 0 {
for _, err := range errs {
if err != nil {
if config.DaemonStatusPtr != nil {
config.DaemonStatusPtr.IncEmitError(err.Error())
}
}
}
} else {
if config.DaemonStatusPtr != nil {
config.DaemonStatusPtr.IncEmit()
}
}
previousEmit = time.Now().UTC()
log.Debugf("(emitter) Emit done at %s", previousEmit)
} else {
log.Debugf("(emitter) No emit needed at this point (last emit: %s)",
previousEmit.Format(time.RFC3339))
time.Sleep(5 * time.Second)
}
case <-config.Finished:
log.Info("Emitter ending.")
config.Finished <- true
return
}
}
} | yrsensor/emitter.go | 0.668772 | 0.46393 | emitter.go | starcoder |
package scene
import (
"image/color"
"github.com/pankona/gomo-simra/simra"
"github.com/pankona/gomo-simra/simra/image"
)
const (
// ScreenWidth is screen width
ScreenWidth = 1080 / 2
// ScreenHeight is screen height
ScreenHeight = 1920 / 2
)
// Title represents a scene object for Title
type Title struct {
simra simra.Simraer
text simra.Spriter
isAnimating bool
}
// Initialize initializes title scene
// This is called from simra.
// simra.SetDesiredScreenSize should be called to determine
// screen size of this scene.
func (t *Title) Initialize(sim simra.Simraer) {
t.simra = sim
t.simra.SetDesiredScreenSize(ScreenWidth, ScreenHeight)
t.initialize()
}
func (t *Title) initialize() {
sprite := t.simra.NewSprite()
sprite.SetScale(ScreenWidth, 80)
sprite.SetPosition(ScreenWidth/2, ScreenHeight/2)
animationSet := simra.NewAnimationSet()
animationSet.AddTexture(t.simra.NewTextTexture("a", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("n", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("i", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("m", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("a", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("t", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("i", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("o", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("n", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("t", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("e", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("s", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.AddTexture(t.simra.NewTextTexture("t", 60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H)))
animationSet.SetInterval(12) // frames
sprite.AddAnimationSet("animation test", animationSet)
t.simra.AddSprite(sprite)
tex := t.simra.NewTextTexture("animation test",
60, color.RGBA{255, 0, 0, 255}, image.Rect(0, 0, sprite.GetScale().W, sprite.GetScale().H))
sprite.ReplaceTexture(tex)
t.simra.AddTouchListener(t)
t.text = sprite
}
// Drive is called from simra.
// This is used to update sprites position.
// This will be called 60 times per sec.
func (t *Title) Drive() {
}
// OnTouchBegin is called when Title scene is Touched.
func (t *Title) OnTouchBegin(x, y float32) {
}
// OnTouchMove is called when Title scene is Touched and moved.
func (t *Title) OnTouchMove(x, y float32) {
}
// OnTouchEnd is called when Title scene is Touched and it is released.
func (t *Title) OnTouchEnd(x, y float32) {
if t.isAnimating {
t.text.StopAnimation()
t.isAnimating = false
} else {
t.text.StartAnimation("animation test", true, func() {})
t.isAnimating = true
}
} | examples/animation1/scene/title.go | 0.616936 | 0.437283 | title.go | starcoder |
package util
import (
"fmt"
"math"
"reflect"
)
// Reads a packed struct.
// place must point to a struct with primitive members only
func ReadPackedStruct(bytes []byte, place interface{}) error {
if place == nil {
return fmt.Errorf("error: ReadPackedStruct(): place is nil")
}
val := reflect.Indirect(reflect.ValueOf(place))
t := val.Type()
if t.Kind() != reflect.Struct {
return fmt.Errorf("error: ReadPackedStruct(): type %s is not a struct", t.Name())
}
for fieldNo := 0; fieldNo < t.NumField(); fieldNo++ {
field := t.Field(fieldNo)
switch field.Type.Kind() {
case reflect.Uint8:
if len(bytes) < 1 {
return fmt.Errorf("error: not enough bytes to read uint8: %d", len(bytes))
}
u8 := uint8(bytes[0])
val.Field(fieldNo).Set(reflect.ValueOf(u8))
bytes = bytes[1:]
case reflect.Int8:
if len(bytes) < 1 {
return fmt.Errorf("error: not enough bytes to read int8: %d", len(bytes))
}
i8 := int8(bytes[0])
val.Field(fieldNo).Set(reflect.ValueOf(i8))
bytes = bytes[1:]
case reflect.Uint16:
if len(bytes) < 2 {
return fmt.Errorf("error: not enough bytes to read uint16: %d", len(bytes))
}
u16 := HostByteOrder.Uint16(bytes[0:2])
val.Field(fieldNo).Set(reflect.ValueOf(u16))
bytes = bytes[2:]
case reflect.Int16:
if len(bytes) < 2 {
return fmt.Errorf("error: not enough bytes to read int16: %d", len(bytes))
}
i16 := int16(HostByteOrder.Uint16(bytes[0:2]))
val.Field(fieldNo).Set(reflect.ValueOf(i16))
bytes = bytes[2:]
case reflect.Uint32:
if len(bytes) < 4 {
return fmt.Errorf("error: not enough bytes to read uint32: %d", len(bytes))
}
u32 := HostByteOrder.Uint32(bytes[0:4])
val.Field(fieldNo).Set(reflect.ValueOf(u32))
bytes = bytes[4:]
case reflect.Int32:
if len(bytes) < 4 {
return fmt.Errorf("error: not enough bytes to read int32: %d", len(bytes))
}
i32 := int32(HostByteOrder.Uint32(bytes[0:4]))
val.Field(fieldNo).Set(reflect.ValueOf(i32))
bytes = bytes[4:]
case reflect.Uint64:
if len(bytes) < 8 {
return fmt.Errorf("error: not enough bytes to read uint64: %d", len(bytes))
}
u64 := HostByteOrder.Uint64(bytes[0:8])
val.Field(fieldNo).Set(reflect.ValueOf(u64))
bytes = bytes[8:]
case reflect.Int64:
if len(bytes) < 8 {
return fmt.Errorf("error: not enough bytes to read int64: %d", len(bytes))
}
i64 := int64(HostByteOrder.Uint64(bytes[0:8]))
val.Field(fieldNo).Set(reflect.ValueOf(i64))
bytes = bytes[8:]
case reflect.Float32:
if len(bytes) < 4 {
return fmt.Errorf("error: not enough bytes to read float32: %d", len(bytes))
}
f32 := math.Float32frombits(HostByteOrder.Uint32(bytes[0:4]))
val.Field(fieldNo).Set(reflect.ValueOf(f32))
bytes = bytes[4:]
case reflect.Float64:
if len(bytes) < 8 {
return fmt.Errorf("error: not enough bytes to read float64: %d", len(bytes))
}
f64 := math.Float64frombits(HostByteOrder.Uint64(bytes[0:8]))
val.Field(fieldNo).Set(reflect.ValueOf(f64))
bytes = bytes[8:]
default:
return fmt.Errorf("error: cannot read non-primitive type %s", field.Type.Name())
}
}
return nil
}
// returns the size of the packed struct.
// It is an error to call this function on anything that is not a struct (or a pointer thereto) with primitive-only members.
func PackedStructSize(theStruct interface{}) (int, error) {
size := 0
if theStruct == nil {
return size, nil
}
t := reflect.TypeOf(theStruct)
v := reflect.ValueOf(theStruct)
if t.Kind() == reflect.Ptr {
v = reflect.Indirect(v)
t = v.Type()
}
if t.Kind() != reflect.Struct {
return 0, fmt.Errorf("error: PackedStructSize() called on non-struct type %s", t.Name())
}
for fieldNo := 0; fieldNo < v.NumField(); fieldNo++ {
field := t.Field(fieldNo)
size += int(field.Type.Size())
}
return size, nil
}
// Converts theStruct into an array of bytes.
// It is an error to call this function on anything that is not a struct (or a pointer thereto) with primitive-only members.
// len(bytes) must be sufficient to store every member of theStruct.
func PackedStructToBytes(bytes []byte, theStruct interface{}) error {
t := reflect.TypeOf(theStruct)
v := reflect.ValueOf(theStruct)
if t.Kind() == reflect.Ptr {
v = reflect.Indirect(v)
t = v.Type()
}
if t.Kind() != reflect.Struct {
return fmt.Errorf("error: PackedStructToBytes() called on non-struct type %s", t.Name())
}
for fieldNo := 0; fieldNo < v.NumField(); fieldNo++ {
field := t.Field(fieldNo)
switch field.Type.Kind() {
case reflect.Uint8:
if len(bytes) < 1 {
return fmt.Errorf("error: not enough space to write uint8: %d", len(bytes))
}
bytes[0] = byte(v.Field(fieldNo).Uint())
bytes = bytes[1:]
case reflect.Int8:
if len(bytes) < 1 {
return fmt.Errorf("error: not enough space to write int8: %d", len(bytes))
}
bytes[0] = byte(v.Field(fieldNo).Int())
bytes = bytes[1:]
case reflect.Uint16:
if len(bytes) < 2 {
return fmt.Errorf("error: not enough space to write uint16: %d", len(bytes))
}
HostByteOrder.PutUint16(bytes[:2], uint16(v.Field(fieldNo).Uint()))
bytes = bytes[2:]
case reflect.Int16:
if len(bytes) < 2 {
return fmt.Errorf("error: not enough space to write int16: %d", len(bytes))
}
HostByteOrder.PutUint16(bytes[:2], uint16(v.Field(fieldNo).Int()))
bytes = bytes[2:]
case reflect.Uint32:
if len(bytes) < 4 {
return fmt.Errorf("error: not enough space to write uint32: %d", len(bytes))
}
HostByteOrder.PutUint32(bytes[:4], uint32(v.Field(fieldNo).Uint()))
bytes = bytes[4:]
case reflect.Int32:
if len(bytes) < 4 {
return fmt.Errorf("error: not enough space to write int32: %d", len(bytes))
}
HostByteOrder.PutUint32(bytes[:4], uint32(v.Field(fieldNo).Int()))
bytes = bytes[4:]
case reflect.Uint64:
if len(bytes) < 8 {
return fmt.Errorf("error: not enough space to write uint64: %d", len(bytes))
}
HostByteOrder.PutUint64(bytes[:8], v.Field(fieldNo).Uint())
bytes = bytes[8:]
case reflect.Int64:
if len(bytes) < 8 {
return fmt.Errorf("error: not enough space to write int64: %d", len(bytes))
}
HostByteOrder.PutUint64(bytes[:8], uint64(v.Field(fieldNo).Int()))
bytes = bytes[8:]
case reflect.Float32:
if len(bytes) < 4 {
return fmt.Errorf("error: not enough space to write float32: %d", len(bytes))
}
HostByteOrder.PutUint32(bytes[:4], math.Float32bits(float32(v.Field(fieldNo).Float())))
bytes = bytes[4:]
case reflect.Float64:
if len(bytes) < 8 {
return fmt.Errorf("error: not enough bytes to read float64: %d", len(bytes))
}
HostByteOrder.PutUint64(bytes[:8], math.Float64bits(v.Field(fieldNo).Float()))
bytes = bytes[8:]
default:
return fmt.Errorf("error: cannot read non-primitive type %s", field.Type.Name())
}
}
return nil
} | util/struct.go | 0.580709 | 0.409132 | struct.go | starcoder |
package ast
import (
"github.com/ajz01/calc/token"
)
type Node interface {
Pos() token.Pos // position of first character belonging to the node
End() token.Pos // position of first character immediately after the node
}
type Expr interface {
Node
exprNode()
}
type Field struct {
Names []*Ident
Type Expr
Tag *BasicLit
}
func (f *Field) Pos() token.Pos {
if len(f.Names) > 0 {
return f.Names[0].Pos()
}
return f.Type.Pos()
}
func (f *Field) End() token.Pos {
if f.Tag != nil {
return f.Tag.End()
}
return f.Type.End()
}
type FieldList struct {
Opening token.Pos
List []*Field
Closing token.Pos
}
func (f *FieldList) Pos() token.Pos {
if f.Opening.IsValid() {
return f.Opening
}
if len(f.List) > 0 {
return f.List[0].Pos()
}
return token.NoPos
}
func (f *FieldList) End() token.Pos {
if f.Closing.IsValid() {
return f.Closing + 1
}
if n := len(f.List); n > 0 {
return f.List[n-1].End()
}
return token.NoPos
}
func (f *FieldList) NumFields() int {
n := 0
if f != nil {
for _, g := range f.List {
m := len(g.Names)
if m == 0 {
m = 1
}
n += m
}
}
return n
}
type (
BadExpr struct {
From, To token.Pos
}
Ident struct {
NamePos token.Pos
Name string
}
BasicLit struct {
ValuePos token.Pos
Kind token.Token
Value string
}
ParenExpr struct {
Lparen token.Pos
X Expr
Rparen token.Pos
}
CallExpr struct {
Fun Expr
Lparen token.Pos
Args []Expr
Rparen token.Pos
}
UnaryExpr struct {
OpPos token.Pos
Op token.Token
X Expr
}
BinaryExpr struct {
X Expr
OpPos token.Pos
Op token.Token
Y Expr
}
/*FuncType struct {
Func token.Pos
Params *FieldList
}*/
)
func (x *BadExpr) Pos() token.Pos { return x.From }
func (x *Ident) Pos() token.Pos { return x.NamePos }
func (x *BasicLit) Pos() token.Pos { return x.ValuePos }
func (x *ParenExpr) Pos() token.Pos { return x.Lparen }
func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() }
func (x *UnaryExpr) Pos() token.Pos { return x.OpPos }
func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() }
/*func (x *FuncType) Pos() token.Pos {
if x.Func.IsValid() || x.Params == nil {
return x.Func
}
return x.Params.Pos()
}*/
func (x *BadExpr) End() token.Pos { return x.To }
func (x *Ident) End() token.Pos { return token.Pos(int(x.NamePos) + len(x.Name)) }
func (x *BasicLit) End() token.Pos { return token.Pos(int(x.ValuePos) + len(x.Value)) }
func (x *ParenExpr) End() token.Pos { return x.Rparen + 1 }
func (x *CallExpr) End() token.Pos { return x.Rparen + 1 }
func (x *UnaryExpr) End() token.Pos { return x.X.End() }
func (x *BinaryExpr) End() token.Pos { return x.Y.End() }
//func (x *FuncType) End() token.Pos { return x.Params.End() }
func (*BadExpr) exprNode() {}
func (*Ident) exprNode() {}
func (*BasicLit) exprNode() {}
func (*ParenExpr) exprNode() {}
func (*CallExpr) exprNode() {}
func (*UnaryExpr) exprNode() {}
func (*BinaryExpr) exprNode() {}
//func (*FuncType) exprNode() {} | ast/ast.go | 0.628179 | 0.411998 | ast.go | starcoder |
package iso20022
// Instruction from an investor to sell investment fund units back to the fund.
type RedemptionOrder15 struct {
// Unique and unambiguous identifier for the order, as assigned by the instructing party.
OrderReference *Max35Text `xml:"OrdrRef"`
// Unique and unambiguous investor's identification of the order. This reference can typically be used in a hub scenario to give the reference of the order as assigned by the underlying client.
ClientReference *Max35Text `xml:"ClntRef,omitempty"`
// Account impacted by the investment fund order.
InvestmentAccountDetails *InvestmentAccount58 `xml:"InvstmtAcctDtls"`
// Category of the investment fund order.
OrderType []*FundOrderType4Choice `xml:"OrdrTp,omitempty"`
// Additional information about the investor.
BeneficiaryDetails []*IndividualPerson32 `xml:"BnfcryDtls,omitempty"`
// Amount of money or the number of units or percentage to be redeemed for the redemption order.
AmountOrUnitsOrPercentage *FinancialInstrumentQuantity28Choice `xml:"AmtOrUnitsOrPctg"`
// Indicates the rounding direction applied to nearest unit.
Rounding *RoundingDirection2Code `xml:"Rndg,omitempty"`
// Total amount of money paid /to be paid or received in exchange for the financial instrument in the individual order.
SettlementAmount *ActiveCurrencyAndAmount `xml:"SttlmAmt,omitempty"`
// Date on which cash is available.
CashSettlementDate *ISODate `xml:"CshSttlmDt,omitempty"`
// Method by which the transaction is settled.
SettlementMethod *DeliveryReceiptType2Code `xml:"SttlmMtd,omitempty"`
// Information needed to process a currency exchange or conversion.
// How the exchange rate is expressed determines which currency is the Unit Currency and Quoted Currency. If the amounts concerned are EUR 1000 and USD 1300, the exchange rate may be expressed as per either of the following examples:
// EXAMPLE 1
// UnitCurrency EUR
// QuotedCurrency USD
// ExchangeRate 1.300
// EXAMPLE 2
// UnitCurrency USD
// QuotedCurrency EUR
// ExchangeRate 0.769
ForeignExchangeDetails *ForeignExchangeTerms32 `xml:"FXDtls,omitempty"`
// Dividend option chosen by the account owner based on the options offered in the prospectus.
IncomePreference *IncomePreference1Code `xml:"IncmPref,omitempty"`
// Tax group to which the purchased investment fund units belong. The investor indicates to the intermediary operating pooled nominees, which type of unit is to be sold.
Group1Or2Units *UKTaxGroupUnit1Code `xml:"Grp1Or2Units,omitempty"`
// Fees (charges/commission) and tax to be applied to the gross amount.
TransactionOverhead *FeeAndTax1 `xml:"TxOvrhd,omitempty"`
// Parameters used to execute the settlement of an investment fund order.
SettlementAndCustodyDetails *FundSettlementParameters12 `xml:"SttlmAndCtdyDtls,omitempty"`
// Indicates whether the financial instrument is to be physically delivered.
PhysicalDeliveryIndicator *YesNoIndicator `xml:"PhysDlvryInd"`
// Information related to the physical delivery of the securities.
PhysicalDeliveryDetails *DeliveryParameters3 `xml:"PhysDlvryDtls,omitempty"`
// Payment process for the transfer of cash from the debtor to the creditor.
CashSettlementDetails *PaymentTransaction72 `xml:"CshSttlmDtls,omitempty"`
// Additional specific settlement information for non-regulated traded funds.
NonStandardSettlementInformation *Max350Text `xml:"NonStdSttlmInf,omitempty"`
// Breakdown of the net amount per type of order.
StaffClientBreakdown []*InvestmentFundsOrderBreakdown2 `xml:"StffClntBrkdwn,omitempty"`
// Specifies if advice has been received from an independent financial advisor.
FinancialAdvice *FinancialAdvice1Code `xml:"FinAdvc,omitempty"`
// Specifies whether the trade is negotiated.
NegotiatedTrade *NegotiatedTrade1Code `xml:"NgtdTrad,omitempty"`
// Party related to the transaction.
RelatedPartyDetails []*Intermediary40 `xml:"RltdPtyDtls,omitempty"`
// Part of an investor's retained subscription amount that is returned by the fund in order to reimburse preliminary incentive/performance fees.
Equalisation *Equalisation1 `xml:"Equlstn,omitempty"`
// Assessment of the customer’s behaviour at the time of the account opening application.
CustomerConductClassification *CustomerConductClassification1Choice `xml:"CstmrCndctClssfctn,omitempty"`
// Means by which the investor or account owner submits the open account form.
TransactionChannelType *TransactionChannelType1Choice `xml:"TxChanlTp,omitempty"`
// Type of signature.
SignatureType *SignatureType1Choice `xml:"SgntrTp,omitempty"`
// Information about a non-standard order.
OrderWaiverDetails *OrderWaiver1 `xml:"OrdrWvrDtls,omitempty"`
}
func (r *RedemptionOrder15) SetOrderReference(value string) {
r.OrderReference = (*Max35Text)(&value)
}
func (r *RedemptionOrder15) SetClientReference(value string) {
r.ClientReference = (*Max35Text)(&value)
}
func (r *RedemptionOrder15) AddInvestmentAccountDetails() *InvestmentAccount58 {
r.InvestmentAccountDetails = new(InvestmentAccount58)
return r.InvestmentAccountDetails
}
func (r *RedemptionOrder15) AddOrderType() *FundOrderType4Choice {
newValue := new(FundOrderType4Choice)
r.OrderType = append(r.OrderType, newValue)
return newValue
}
func (r *RedemptionOrder15) AddBeneficiaryDetails() *IndividualPerson32 {
newValue := new(IndividualPerson32)
r.BeneficiaryDetails = append(r.BeneficiaryDetails, newValue)
return newValue
}
func (r *RedemptionOrder15) AddAmountOrUnitsOrPercentage() *FinancialInstrumentQuantity28Choice {
r.AmountOrUnitsOrPercentage = new(FinancialInstrumentQuantity28Choice)
return r.AmountOrUnitsOrPercentage
}
func (r *RedemptionOrder15) SetRounding(value string) {
r.Rounding = (*RoundingDirection2Code)(&value)
}
func (r *RedemptionOrder15) SetSettlementAmount(value, currency string) {
r.SettlementAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (r *RedemptionOrder15) SetCashSettlementDate(value string) {
r.CashSettlementDate = (*ISODate)(&value)
}
func (r *RedemptionOrder15) SetSettlementMethod(value string) {
r.SettlementMethod = (*DeliveryReceiptType2Code)(&value)
}
func (r *RedemptionOrder15) AddForeignExchangeDetails() *ForeignExchangeTerms32 {
r.ForeignExchangeDetails = new(ForeignExchangeTerms32)
return r.ForeignExchangeDetails
}
func (r *RedemptionOrder15) SetIncomePreference(value string) {
r.IncomePreference = (*IncomePreference1Code)(&value)
}
func (r *RedemptionOrder15) SetGroup1Or2Units(value string) {
r.Group1Or2Units = (*UKTaxGroupUnit1Code)(&value)
}
func (r *RedemptionOrder15) AddTransactionOverhead() *FeeAndTax1 {
r.TransactionOverhead = new(FeeAndTax1)
return r.TransactionOverhead
}
func (r *RedemptionOrder15) AddSettlementAndCustodyDetails() *FundSettlementParameters12 {
r.SettlementAndCustodyDetails = new(FundSettlementParameters12)
return r.SettlementAndCustodyDetails
}
func (r *RedemptionOrder15) SetPhysicalDeliveryIndicator(value string) {
r.PhysicalDeliveryIndicator = (*YesNoIndicator)(&value)
}
func (r *RedemptionOrder15) AddPhysicalDeliveryDetails() *DeliveryParameters3 {
r.PhysicalDeliveryDetails = new(DeliveryParameters3)
return r.PhysicalDeliveryDetails
}
func (r *RedemptionOrder15) AddCashSettlementDetails() *PaymentTransaction72 {
r.CashSettlementDetails = new(PaymentTransaction72)
return r.CashSettlementDetails
}
func (r *RedemptionOrder15) SetNonStandardSettlementInformation(value string) {
r.NonStandardSettlementInformation = (*Max350Text)(&value)
}
func (r *RedemptionOrder15) AddStaffClientBreakdown() *InvestmentFundsOrderBreakdown2 {
newValue := new(InvestmentFundsOrderBreakdown2)
r.StaffClientBreakdown = append(r.StaffClientBreakdown, newValue)
return newValue
}
func (r *RedemptionOrder15) SetFinancialAdvice(value string) {
r.FinancialAdvice = (*FinancialAdvice1Code)(&value)
}
func (r *RedemptionOrder15) SetNegotiatedTrade(value string) {
r.NegotiatedTrade = (*NegotiatedTrade1Code)(&value)
}
func (r *RedemptionOrder15) AddRelatedPartyDetails() *Intermediary40 {
newValue := new(Intermediary40)
r.RelatedPartyDetails = append(r.RelatedPartyDetails, newValue)
return newValue
}
func (r *RedemptionOrder15) AddEqualisation() *Equalisation1 {
r.Equalisation = new(Equalisation1)
return r.Equalisation
}
func (r *RedemptionOrder15) AddCustomerConductClassification() *CustomerConductClassification1Choice {
r.CustomerConductClassification = new(CustomerConductClassification1Choice)
return r.CustomerConductClassification
}
func (r *RedemptionOrder15) AddTransactionChannelType() *TransactionChannelType1Choice {
r.TransactionChannelType = new(TransactionChannelType1Choice)
return r.TransactionChannelType
}
func (r *RedemptionOrder15) AddSignatureType() *SignatureType1Choice {
r.SignatureType = new(SignatureType1Choice)
return r.SignatureType
}
func (r *RedemptionOrder15) AddOrderWaiverDetails() *OrderWaiver1 {
r.OrderWaiverDetails = new(OrderWaiver1)
return r.OrderWaiverDetails
} | RedemptionOrder15.go | 0.867836 | 0.423935 | RedemptionOrder15.go | starcoder |
package main
import (
"fmt"
"math"
"os"
yaml "github.com/goccy/go-yaml"
"github.com/soypat/godesim"
"github.com/soypat/godesim/state"
)
// Declare simulation constants: softening coefficient and big G
const softening, G float64 = 1.0, 6.6743e-11
var sin, pi = math.Sin, math.Pi
type body struct {
name string
// [kg]
mass float64
// initial positions [m]
x0, y0, z0 float64
// initial velocities [m/s]
u0, v0, w0 float64
}
func (b body) sym(s string) state.Symbol { return state.Symbol(fmt.Sprintf("%s_%s", s, b.name)) }
type bodies []body
func (bds bodies) DiffMap() map[state.Symbol]state.Diff {
m := make(map[state.Symbol]state.Diff)
for i := range bds {
bd1 := bds[i] // define new variable so closure escapes looping variable
for _, x := range []string{"x", "y", "z"} {
vars := x // escape looping variable
sym1 := bd1.sym(vars)
Dsym := "D" + sym1
m[sym1] = func(s state.State) float64 { return s.X(Dsym) }
m[Dsym] = func(s state.State) float64 {
sum := 0.0
for _, bd2 := range bds {
if bd1.name == bd2.name {
continue
}
diff := s.X(bd2.sym(vars)) - s.X(sym1)
sum += bd2.mass * diff * math.Pow(math.Abs(diff)+softening, -3.0)
}
return G * sum
}
}
}
return m
}
func (bds bodies) X0Map() map[state.Symbol]float64 {
m := make(map[state.Symbol]float64)
for _, bd := range bds {
for _, x := range []string{"x", "y", "z"} {
sym := bd.sym(x)
Dsym := "D" + sym
switch x {
case "x":
m[sym], m[Dsym] = bd.x0, bd.u0
case "y":
m[sym], m[Dsym] = bd.y0, bd.v0
case "z":
m[sym], m[Dsym] = bd.z0, bd.w0
}
}
}
return m
}
func main() {
system := bodies{
body{name: "earth", mass: 5.972e24}, // what do you mean geocentric model not true?
body{name: "moon", mass: 7.3477e22, x0: 384e6, v0: 1.022e3},
body{name: "iss", mass: 420e3, x0: 408e3, v0: 7.66e3},
}
fp, err := os.Open("cfg.yml")
if err != nil {
panic(err)
}
cfg := new(godesim.Config)
y := yaml.NewDecoder(fp)
y.Decode(cfg)
sim := godesim.New().SetConfig(*cfg)
sim.SetDiffFromMap(system.DiffMap())
x0 := system.X0Map()
sim.SetX0FromMap(x0)
sim.SetTimespan(0., daysToSeconds(28.), 1000)
fp, err = os.Create("output.csv")
if err != nil {
panic(err)
}
sim.Logger.Output = fp
sim.Solver = godesim.RKF45Solver
sim.Begin()
// time, x := sim.Results("time"), sim.Results(system[1].sym("x"))
// fmt.Printf("%.2f\n\n%.2f\n", time, x)
}
func daysToSeconds(d float64) float64 {
const d2s = 24. * 60. * 60.
return d * d2s
} | _examples/n-body/nbody.go | 0.556882 | 0.423875 | nbody.go | starcoder |
package ast
import (
"fmt"
"strings"
"github.com/huandu/go-clone"
"github.com/stackoverflow/novah-go/data"
)
type KindType = int
const (
STAR KindType = iota
CTOR
)
type Kind struct {
Type KindType
Arity int
}
func (k Kind) String() string {
if k.Type == STAR {
return "Type"
}
return data.JoinToStringFunc(data.Range(0, k.Arity), " -> ", func(_ int) string { return "Type" })
}
type TypeVarTag = int
const (
UNBOUND TypeVarTag = iota
LINK
GENERIC
)
type Id = int
type Level = int
type TypeVar struct {
Tag TypeVarTag
Id Id
Level Level
Type Type
}
type Type interface {
sType()
Clone() Type
GetSpan() data.Span
WithSpan(data.Span) Type
GetKind() Kind
Equals(Type) bool
fmt.Stringer
}
type TConst struct {
Name string
Kind Kind
Span data.Span
}
type TApp struct {
Type Type
Types []Type
Span data.Span
}
type TArrow struct {
Args []Type
Ret Type
Span data.Span
}
type TImplicit struct {
Type Type
Span data.Span
}
type TRecord struct {
Row Type
Span data.Span
}
type TRowEmpty struct {
Span data.Span
}
type TRowExtend struct {
Labels data.LabelMap[Type]
Row Type
Span data.Span
}
type TVar struct {
Tvar *TypeVar
Span data.Span
}
func (_ TConst) sType() {}
func (_ TApp) sType() {}
func (_ TArrow) sType() {}
func (_ TImplicit) sType() {}
func (_ TRecord) sType() {}
func (_ TRowEmpty) sType() {}
func (_ TRowExtend) sType() {}
func (_ TVar) sType() {}
func (t TConst) GetSpan() data.Span {
return t.Span
}
func (t TApp) GetSpan() data.Span {
return t.Span
}
func (t TArrow) GetSpan() data.Span {
return t.Span
}
func (t TImplicit) GetSpan() data.Span {
return t.Span
}
func (t TRecord) GetSpan() data.Span {
return t.Span
}
func (t TRowEmpty) GetSpan() data.Span {
return t.Span
}
func (t TRowExtend) GetSpan() data.Span {
return t.Span
}
func (t TVar) GetSpan() data.Span {
return t.Span
}
func (t TConst) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TApp) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TArrow) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TImplicit) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TRecord) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TRowEmpty) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TRowExtend) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TVar) WithSpan(span data.Span) Type {
t.Span = span
return t
}
func (t TConst) Clone() Type {
return clone.Clone(t).(TConst)
}
func (t TApp) Clone() Type {
return clone.Clone(t).(TApp)
}
func (t TArrow) Clone() Type {
return clone.Clone(t).(TArrow)
}
func (t TImplicit) Clone() Type {
return clone.Clone(t).(TImplicit)
}
func (t TRecord) Clone() Type {
return clone.Clone(t).(TRecord)
}
func (t TRowEmpty) Clone() Type {
return clone.Clone(t).(TRowEmpty)
}
func (t TRowExtend) Clone() Type {
return clone.Clone(t).(TRowExtend)
}
func (t TVar) Clone() Type {
return clone.Clone(t).(TVar)
}
func (t TConst) String() string {
return ShowType(t)
}
func (t TApp) String() string {
return ShowType(t)
}
func (t TArrow) String() string {
return ShowType(t)
}
func (t TImplicit) String() string {
return ShowType(t)
}
func (t TRecord) String() string {
return ShowType(t)
}
func (t TRowEmpty) String() string {
return ShowType(t)
}
func (t TRowExtend) String() string {
return ShowType(t)
}
func (t TVar) String() string {
return ShowType(t)
}
func (t TConst) GetKind() Kind {
return t.Kind
}
func (t TArrow) GetKind() Kind {
return Kind{Type: CTOR, Arity: 1}
}
func (t TApp) GetKind() Kind {
return t.Type.GetKind()
}
func (t TVar) GetKind() Kind {
tv := t.Tvar
if tv.Tag == LINK {
return tv.Type.GetKind()
}
return Kind{Type: STAR}
}
// this is not right, but I'll postpone adding real row kinds for now
func (t TRowEmpty) GetKind() Kind {
return Kind{Type: STAR}
}
func (t TRecord) GetKind() Kind {
return t.Row.GetKind()
}
func (t TRowExtend) GetKind() Kind {
return t.Row.GetKind()
}
func (t TImplicit) GetKind() Kind {
return t.Type.GetKind()
}
func (t TConst) Equals(other Type) bool {
tc, isTc := other.(TConst)
if isTc {
return t.Name == tc.Name
}
return false
}
func (t TApp) Equals(other Type) bool {
tapp, isTapp := other.(TApp)
if !isTapp {
return false
}
if !t.Type.Equals(tapp.Type) || len(t.Types) != len(tapp.Types) {
return false
}
for i := 0; i < len(t.Types); i++ {
if !t.Types[i].Equals(tapp.Types[i]) {
return false
}
}
return true
}
func (t TArrow) Equals(other Type) bool {
tarr, isTaarr := other.(TArrow)
if !isTaarr {
return false
}
if !t.Ret.Equals(tarr.Ret) || len(t.Args) != len(tarr.Args) {
return false
}
for i := 0; i < len(t.Args); i++ {
if !t.Args[i].Equals(tarr.Args[i]) {
return false
}
}
return true
}
func (t TVar) Equals(other Type) bool {
tv, isTvar := other.(TVar)
if !isTvar {
return false
}
tvar := t.Tvar
if tvar.Tag == LINK {
return tv.Tvar.Tag == LINK && tvar.Type.Equals(tv.Tvar.Type)
}
if tvar.Tag == UNBOUND {
return tv.Tvar.Tag == UNBOUND && tvar.Id == tv.Tvar.Id && tvar.Level == tv.Tvar.Level
}
return tv.Tvar.Tag == GENERIC && tvar.Id == tv.Tvar.Id
}
func (t TRowEmpty) Equals(other Type) bool {
_, isTre := other.(TRowEmpty)
return isTre
}
func (t TRecord) Equals(other Type) bool {
rec, isTre := other.(TRecord)
if !isTre {
return false
}
return t.Row.Equals(rec.Row)
}
func (t TRowExtend) Equals(other Type) bool {
rec, isTre := other.(TRowExtend)
if !isTre {
return false
}
if !t.Row.Equals(rec.Row) {
return false
}
ents1, ents2 := t.Labels.Entries(), rec.Labels.Entries()
if len(ents1) != len(ents2) {
return false
}
for i := 0; i < len(ents1); i++ {
e1, e2 := ents1[i], ents2[i]
if e1.Label != e2.Label || !e1.Val.Equals(e2.Val) {
return false
}
}
return true
}
func (t TImplicit) Equals(other Type) bool {
tim, isTim := other.(TImplicit)
if !isTim {
return false
}
return t.Type.Equals(tim.Type)
}
func RealType(ty Type) Type {
if tv, isTvar := ty.(TVar); isTvar && tv.Tvar.Tag == LINK {
return RealType(tv.Tvar.Type)
}
return ty
}
// pretty print this type
func ShowType(typ Type) string {
return ShowTypeInner(typ, true, make(map[int]string))
}
func ShowTypeInner(typ Type, qualified bool, tvarsMap map[int]string) string {
showId := func(id Id) string {
if id, has := tvarsMap[id]; has {
return id
}
if id >= 0 {
return fmt.Sprintf("t%d", id)
} else {
return fmt.Sprintf("u%d", -id)
}
}
var run func(Type, bool, bool) string
run = func(ty Type, nested bool, topLevel bool) string {
switch t := ty.(type) {
case TConst:
if qualified {
return t.Name
} else {
name := strings.Split(t.Name, ".")
return name[len(name)-1]
}
case TApp:
{
sname := run(t.Type, nested, false)
if len(t.Types) == 0 {
return sname
}
sname = fmt.Sprintf("%s %s", sname, data.JoinToStringFunc(t.Types, " ", func(t Type) string { return run(t, true, false) }))
if nested {
return fmt.Sprintf("(%s)", sname)
}
return sname
}
case TArrow:
{
arg := t.Args[0]
_, isArr := arg.(TArrow)
args := run(arg, isArr, false)
if nested {
return fmt.Sprintf("(%s -> %s)", args, run(t.Ret, false, false))
}
return fmt.Sprintf("%s -> %s", args, run(t.Ret, nested, false))
}
case TVar:
{
tv := t.Tvar
if tv.Tag == LINK {
return run(tv.Type, nested, topLevel)
}
return showId(tv.Id)
}
case TRowEmpty:
return "[]"
case TRecord:
{
switch r := RealType(t.Row).(type) {
case TRowEmpty:
return "{}"
case TRowExtend:
{
rows := run(r, false, true)
return fmt.Sprintf("{%s}", rows[1:len(rows)-1])
}
default:
return fmt.Sprintf("{ | %s }", run(r, false, true))
}
}
case TRowExtend:
{
labels := t.Labels.Show(func(k string, v Type) string { return fmt.Sprintf("%s : %s", k, run(v, false, true)) })
var str string
switch r := RealType(t.Row).(type) {
case TRowEmpty:
str = labels
case TRowExtend:
{
rows := run(r, false, true)
if labels == "" {
str = rows[2 : len(rows)-2]
} else {
str = fmt.Sprintf("%s, %s", labels, rows[2:len(rows)-2])
}
}
default:
if labels == "" {
str = fmt.Sprintf("| %s", run(r, false, true))
} else {
str = fmt.Sprintf("%s | %s", labels, run(r, false, true))
}
}
return fmt.Sprintf("[ %s ]", str)
}
case TImplicit:
return fmt.Sprintf("{{ %s }}", run(t.Type, false, false))
default:
panic("got unknow type in ShowType")
}
}
return run(typ, false, true)
}
func SubstConst(typ Type, m map[string]Type) Type {
switch t := typ.(type) {
case TConst:
if ty, has := m[t.Name]; has {
return ty
} else {
return typ
}
case TApp:
return TApp{
Type: SubstConst(t.Type, m),
Types: data.MapSlice(t.Types, func(t Type) Type { return SubstConst(t, m) }),
Span: t.Span,
}
case TArrow:
return TArrow{
Args: data.MapSlice(t.Args, func(t Type) Type { return SubstConst(t, m) }),
Ret: SubstConst(t.Ret, m),
Span: t.Span,
}
case TVar:
{
if t.Tvar.Tag == LINK {
return TVar{Tvar: &TypeVar{Tag: LINK, Type: SubstConst(t.Tvar.Type, m)}, Span: t.Span}
}
return typ
}
case TRowEmpty:
return typ
case TRecord:
return TRecord{Row: SubstConst(t.Row, m), Span: t.Span}
case TRowExtend:
return TRowExtend{
Labels: data.LabelMapValues(t.Labels, func(t Type) Type { return SubstConst(t, m) }),
Row: SubstConst(t.Row, m),
Span: t.Span,
}
case TImplicit:
return TImplicit{Type: SubstConst(t.Type, m), Span: t.Span}
default:
panic("Got unknow type in SubstConst")
}
}
// Recursively walks this type up->bottom
func EverywhereTypeUnit(this Type, f func(Type)) {
var run func(Type)
run = func(typ Type) {
f(typ)
switch t := typ.(type) {
case TApp:
{
run(t.Type)
for _, ty := range t.Types {
run(ty)
}
}
case TArrow:
{
for _, ty := range t.Args {
run(ty)
}
run(t.Ret)
}
case TVar:
if t.Tvar.Tag == LINK {
run(t.Tvar.Type)
}
case TRecord:
run(t.Row)
case TRowExtend:
{
run(t.Row)
for _, ty := range t.Labels.Values() {
run(ty)
}
}
case TImplicit:
run(t.Type)
}
}
run(this)
}
func NestArrows(args []Type, ret Type) Type {
if len(args) == 0 {
return ret
}
return TArrow{Args: []Type{args[0]}, Ret: NestArrows(args[1:], ret)}
} | compiler/ast/type.go | 0.572723 | 0.451568 | type.go | starcoder |
package successor
func (s *State) feasible(index1 int) []int {
f := make([]int, 0, len(s.domain))
for _, index2 := range s.domain {
// (i j) would create a cycle
if s.partial[index1] == s.partial[index2] {
continue
}
if intersect(s.partial[index1], s.succ[index2]) || intersect(s.pred[index1], s.partial[index2]) {
continue
}
if intersect(s.pred[index1], s.succ[index2]) || intersect(s.succ[index1], s.pred[index2]) {
continue
}
f = append(f, index2)
}
return f
}
func (s *State) nextCost(index1, index2 int) int64 {
node1 := s.problem.Nodes[index1]
node2 := s.problem.Nodes[index2]
cost, _ := s.problem.Cost(node1, node2)
return s.cost + cost
}
func (s *State) nextDomain(index2 int) []int {
domain := make([]int, 0, len(s.domain)-1)
for _, index := range s.domain {
if index != index2 {
domain = append(domain, index)
}
}
return domain
}
func (s *State) nextPartial(index1, index2 int) []*[]bool {
u := union(s.partial[index1], s.partial[index2])
partial := make([]*[]bool, len(s.partial))
for index := range s.partial {
if (*u)[index] {
partial[index] = u
} else {
partial[index] = s.partial[index]
}
}
return partial
}
func (s *State) nextPrev(index1, index2 int) []int {
prev := make([]int, len(s.prev))
for index, prevIndex := range s.prev {
if index == index2 {
prev[index] = index1
} else {
prev[index] = prevIndex
}
}
return prev
}
func (s *State) nextNext(index1, index2 int) []int {
next := make([]int, len(s.next))
for index, nextIndex := range s.next {
if index == index1 {
next[index] = index2
} else {
next[index] = nextIndex
}
}
return next
}
func (s *State) nextPred(index1, index2 int, partial *[]bool) []*[]bool {
oldPred1, oldPred2 := s.pred[index1], s.pred[index2]
u := unionMinus(oldPred1, oldPred2, partial)
pred := make([]*[]bool, len(s.pred))
for index := range s.pred {
if (*partial)[index] {
pred[index] = u
} else {
pred[index] = s.pred[index]
}
}
return pred
}
func (s *State) nextSucc(index1, index2 int, partial *[]bool) []*[]bool {
oldSucc1, oldSucc2 := s.succ[index1], s.succ[index2]
u := unionMinus(oldSucc1, oldSucc2, partial)
succ := make([]*[]bool, len(s.succ))
for index := range s.succ {
if (*partial)[index] {
succ[index] = u
} else {
succ[index] = s.succ[index]
}
}
return succ
}
func (s *State) inferPred(index int) {
inferredPred := union(s.pred[index], s.partial[index])
oldToNew := make([]*[]bool, len(s.succ))
for successor, v := range *s.succ[index] {
if !v {
continue
}
if oldToNew[successor] != nil {
s.pred[successor] = oldToNew[successor]
} else {
oldToNew[successor] = unionMinus(inferredPred, s.pred[successor], s.partial[successor])
s.pred[successor] = oldToNew[successor]
}
}
}
func (s *State) inferSucc(index int) {
inferredSucc := union(s.succ[index], s.partial[index])
oldToNew := make([]*[]bool, len(s.succ))
for predecessor, v := range *s.pred[index] {
if !v {
continue
}
if oldToNew[predecessor] != nil {
s.succ[predecessor] = oldToNew[predecessor]
} else {
oldToNew[predecessor] = unionMinus(inferredSucc, s.succ[predecessor], s.partial[predecessor])
s.succ[predecessor] = oldToNew[predecessor]
}
}
}
func intersect(set1, set2 *[]bool) bool {
for i := range *set1 {
if (*set1)[i] && (*set2)[i] {
return true
}
}
return false
}
func union(set1, set2 *[]bool) *[]bool {
u := make([]bool, len(*set1))
for i := range *set1 {
if (*set1)[i] || (*set2)[i] {
u[i] = true
}
}
return &u
}
func unionMinus(set1, set2, out *[]bool) *[]bool {
u := make([]bool, len(*set1))
for i := range *set1 {
if !(*out)[i] && ((*set1)[i] || (*set2)[i]) {
u[i] = true
}
}
return &u
} | tsppd/solvers/successor/next.go | 0.566258 | 0.419886 | next.go | starcoder |
// the fastcheck.OppoBloomFilter provides two methods instead of one, a contains and an add. This makes it possible to
// check and then optionally add the value. It is possible that two threads may race and add it multiple times
package fastcheck
import (
"bytes"
"context"
"crypto/md5" //nolint:gosec
"errors"
"hash"
"math"
"sync/atomic"
"unsafe"
"github.com/flyteorg/flytestdlib/promutils"
)
var ErrSizeTooLarge = errors.New("oppobloom: size given too large to round to a power of 2")
var ErrSizeTooSmall = errors.New("oppobloom: filter cannot have a zero or negative size")
var MaxFilterSize = 1 << 30
// validate that it conforms to the interface
var _ Filter = &OppoBloomFilter{}
type md5UintHash struct {
hash.Hash // a hack with knowledge of how md5 works
}
func (m md5UintHash) Sum32() uint32 {
sum := m.Sum(nil)
x := uint32(sum[0])
for _, val := range sum[1:3] {
x = x << 3
x += uint32(val)
}
return x
}
// Implementation of the oppoFilter proposed in https://github.com/jmhodges/opposite_of_a_bloom_filter/ and
// the related blog https://www.somethingsimilar.com/2012/05/21/the-opposite-of-a-bloom-filter/
type OppoBloomFilter struct {
array []*[]byte
sizeMask uint32
metrics Metrics
}
// getIndex calculates the hashindex of the given id
func (f *OppoBloomFilter) getIndex(id []byte) int32 {
//nolint:gosec
h := md5UintHash{md5.New()}
h.Write(id)
uindex := h.Sum32() & f.sizeMask
return int32(uindex)
}
func (f *OppoBloomFilter) Add(_ context.Context, id []byte) bool {
oldID := getAndSet(f.array, f.getIndex(id), id)
return !bytes.Equal(oldID, id)
}
func (f *OppoBloomFilter) Contains(_ context.Context, id []byte) bool {
curr := get(f.array, f.getIndex(id))
if curr != nil {
if bytes.Equal(id, *curr) {
f.metrics.Hit.Inc()
return true
}
}
f.metrics.Miss.Inc()
return false
}
// Helper methods
// get returns the actual value stored at the given index. If not found the value can be nil
func get(arr []*[]byte, index int32) *[]byte {
indexPtr := (*unsafe.Pointer)(unsafe.Pointer(&arr[index]))
return (*[]byte)(atomic.LoadPointer(indexPtr))
}
// getAndSet Returns the id that was in the slice at the given index after putting the
// new id in the slice at that index, atomically.
func getAndSet(arr []*[]byte, index int32, id []byte) []byte {
indexPtr := (*unsafe.Pointer)(unsafe.Pointer(&arr[index]))
idUnsafe := unsafe.Pointer(&id)
var oldID []byte
for {
oldIDUnsafe := atomic.LoadPointer(indexPtr)
if atomic.CompareAndSwapPointer(indexPtr, oldIDUnsafe, idUnsafe) {
oldIDPtr := (*[]byte)(oldIDUnsafe)
if oldIDPtr != nil {
oldID = *oldIDPtr
}
break
}
}
return oldID
}
// NewOppoBloomFilter creates a new Opposite of Bloom filter proposed in https://github.com/jmhodges/opposite_of_a_bloom_filter/ and
// the related blog https://www.somethingsimilar.com/2012/05/21/the-opposite-of-a-bloom-filter/
func NewOppoBloomFilter(size int, scope promutils.Scope) (*OppoBloomFilter, error) {
if size > MaxFilterSize {
return nil, ErrSizeTooLarge
}
if size <= 0 {
return nil, ErrSizeTooSmall
}
// round to the next largest power of two
size = int(math.Pow(2, math.Ceil(math.Log2(float64(size)))))
slice := make([]*[]byte, size)
sizeMask := uint32(size - 1)
return &OppoBloomFilter{slice, sizeMask, newMetrics(scope)}, nil
} | fastcheck/oppobloom.go | 0.622459 | 0.500061 | oppobloom.go | starcoder |
package lexer
import (
"regexp"
"token"
)
type Pattern struct {
expr *regexp.Regexp
kind token.TokenType
}
type Error struct {
RowIndex int
ColumnIndex int
Value string
}
type Lexer struct {
patterns []Pattern
whiteSpacesReg *regexp.Regexp
absorbErrorReg *regexp.Regexp
rowIndex int
columtIndex int
tokens []token.Token
errors []Error
}
func (self *Lexer) trim(expression string) string {
slice := self.whiteSpacesReg.FindStringIndex(expression)
if slice != nil {
self.columtIndex += slice[1]
return expression[slice[1]:]
}
return expression
}
func (self *Lexer) absorbError(expression string) (string, string) {
slice := self.absorbErrorReg.FindStringIndex(expression)
if slice != nil {
self.columtIndex += slice[1]
return expression[slice[1]:], expression[slice[0]:slice[1]]
}
return expression, ""
}
func (self *Lexer) appendError(expression string) string {
var errorElem Error
errorElem.ColumnIndex = self.columtIndex
errorElem.RowIndex = self.rowIndex
expression, errorElem.Value = self.absorbError(expression)
self.errors = append(self.errors, errorElem)
return expression
}
func (self *Lexer) parseTokenByExp(expressionSrc string, re regexp.Regexp, tokenType token.TokenType) (expression string, token token.Token) {
slice := re.FindStringIndex(expressionSrc)
expression = expressionSrc
if slice != nil {
token.ColumnIndex = self.columtIndex
token.RowIndex = self.rowIndex
token.TokenType = tokenType
token.Value = expression[slice[0]:slice[1]]
expression = expression[slice[1]:]
self.columtIndex += slice[1]
return expression, token
}
return expression, token
}
func (self *Lexer) parseToken(expressionSrc string) (expression string, tokenElem token.Token) {
expression = self.trim(expressionSrc)
for _, value := range self.patterns {
expression, tokenElem = self.parseTokenByExp(expression, *value.expr, value.kind)
if tokenElem.TokenType != token.ILLEGAL {
break
}
}
if tokenElem.TokenType == token.ILLEGAL {
expression = self.appendError(expression)
}
return expression, tokenElem
}
func (self *Lexer) ParseTokens(expression string) ([]token.Token, []Error) {
self.rowIndex = 0
self.columtIndex = 0
pattern := Pattern {
expr: regexp.MustCompile(`^((\/\*).*(\*\/))`),
kind: token.COMMENT,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^((\").*(\"))`),
kind: token.STRING_VALUE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(int)`),
kind: token.INT,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(float)`),
kind: token.FLOAT,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(string)`),
kind: token.STRING,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\+)`),
kind: token.ADD,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\-)`),
kind: token.SUB,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\*)`),
kind: token.MUL,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\/)`),
kind: token.QUO,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\%)`),
kind: token.REM,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\()`),
kind: token.LPAREN,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\[)`),
kind: token.LBRACK,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\{)`),
kind: token.LBRACE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\))`),
kind: token.RPAREN,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\])`),
kind: token.RBRACK,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\})`),
kind: token.RBRACE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(;)`),
kind: token.SEMICOLON,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(:)`),
kind: token.COLON,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(,)`),
kind: token.COMMA,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(break)`),
kind: token.BREAK,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(continue)`),
kind: token.CONTINUE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(if)`),
kind: token.IF,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(else)`),
kind: token.ELSE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(for)`),
kind: token.FOR,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(func)`),
kind: token.FUNC,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(return)`),
kind: token.RETURN,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(type)`),
kind: token.TYPE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(var)`),
kind: token.VAR,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(sin)`),
kind: token.SIN,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(cos)`),
kind: token.COS,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(sqrt)`),
kind: token.SQRT,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\=\=)`),
kind: token.EQUAL,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\=)`),
kind: token.ASSIGNED,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(([0-9]*\.[0-9]+)|([0-9]+\.[0-9]*)|([0-9]+))f`),
kind: token.FLOAT_NUMBER,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^([0-9]+)`),
kind: token.INT_NUMBER,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^([a-zA-Z]+[a-zA-Z0-9]*)`),
kind: token.IDENTIFIER,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\!\=)`),
kind: token.NOT_EQUAL,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\!)`),
kind: token.NOT,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\>)`),
kind: token.GREATE,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\&\&)`),
kind: token.AND,
}
self.patterns = append(self.patterns, pattern)
pattern = Pattern {
expr: regexp.MustCompile(`^(\|\|)`),
kind: token.OR,
}
self.patterns = append(self.patterns, pattern)
self.whiteSpacesReg = regexp.MustCompile(`^(\s+)`)
self.absorbErrorReg = regexp.MustCompile(`^(.[^\s]+)`)
var t token.Token
for expression != "" {
expression, t = self.parseToken(expression)
self.tokens = append(self.tokens, t)
expression = self.trim(expression)
}
self.tokens = append(self.tokens, token.Token{
TokenType: token.EOF,
})
return self.tokens, self.errors
} | src/lexer/lexer.go | 0.661923 | 0.448426 | lexer.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.