code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package movers
import (
"github.com/wieku/danser-go/app/beatmap/objects"
"github.com/wieku/danser-go/app/bmath"
"github.com/wieku/danser-go/app/settings"
"github.com/wieku/danser-go/framework/math/curves"
"github.com/wieku/danser-go/framework/math/math32"
"github.com/wieku/danser-go/framework/math/vector"
"math"
)
type AngleOffsetMover struct {
lastAngle float32
lastPoint vector.Vector2f
bz *curves.Bezier
startTime, endTime int64
invert float32
}
func NewAngleOffsetMover() MultiPointMover {
return &AngleOffsetMover{lastAngle: 0, invert: 1}
}
func (bm *AngleOffsetMover) Reset() {
bm.lastAngle = 0
bm.invert = 1
bm.lastPoint = vector.NewVec2f(0, 0)
}
func (bm *AngleOffsetMover) SetObjects(objs []objects.BaseObject) int {
end := objs[0]
start := objs[1]
endPos := end.GetBasicData().EndPos
endTime := end.GetBasicData().EndTime
startPos := start.GetBasicData().StartPos
startTime := start.GetBasicData().StartTime
distance := endPos.Dst(startPos)
s1, ok1 := end.(*objects.Slider)
s2, ok2 := start.(*objects.Slider)
var points []vector.Vector2f
scaledDistance := distance * float32(settings.Dance.Flower.DistanceMult)
newAngle := float32(settings.Dance.Flower.AngleOffset) * math32.Pi / 180.0
if end.GetBasicData().StartTime > 0 && settings.Dance.Flower.LongJump >= 0 && (startTime-endTime) > settings.Dance.Flower.LongJump {
scaledDistance = float32(startTime-endTime) * float32(settings.Dance.Flower.LongJumpMult)
}
if endPos == startPos {
if settings.Dance.Flower.LongJumpOnEqualPos {
scaledDistance = float32(startTime-endTime) * float32(settings.Dance.Flower.LongJumpMult)
if math.Abs(float64(startTime-endTime)) > 1 {
bm.lastAngle += math.Pi
}
pt1 := vector.NewVec2fRad(bm.lastAngle, scaledDistance).Add(endPos)
if ok1 {
pt1 = vector.NewVec2fRad(s1.GetEndAngle(), scaledDistance).Add(endPos)
}
if !ok2 {
angle := bm.lastAngle - newAngle*bm.invert
pt2 := vector.NewVec2fRad(angle, scaledDistance).Add(startPos)
if math.Abs(float64(startTime-endTime)) > 1 {
bm.lastAngle = angle
}
points = []vector.Vector2f{endPos, pt1, pt2, startPos}
} else {
pt2 := vector.NewVec2fRad(s2.GetStartAngle(), scaledDistance).Add(startPos)
points = []vector.Vector2f{endPos, pt1, pt2, startPos}
}
} else {
points = []vector.Vector2f{endPos, startPos}
}
} else if ok1 && ok2 {
bm.invert = -1 * bm.invert
pt1 := vector.NewVec2fRad(s1.GetEndAngle(), scaledDistance).Add(endPos)
pt2 := vector.NewVec2fRad(s2.GetStartAngle(), scaledDistance).Add(startPos)
points = []vector.Vector2f{endPos, pt1, pt2, startPos}
} else if ok1 {
bm.invert = -1 * bm.invert
if math.Abs(float64(startTime-endTime)) > 1 {
bm.lastAngle = endPos.AngleRV(startPos) - newAngle*bm.invert
} else {
bm.lastAngle = s1.GetEndAngle() + math.Pi
}
pt1 := vector.NewVec2fRad(s1.GetEndAngle(), scaledDistance).Add(endPos)
pt2 := vector.NewVec2fRad(bm.lastAngle, scaledDistance).Add(startPos)
points = []vector.Vector2f{endPos, pt1, pt2, startPos}
} else if ok2 {
if math.Abs(float64(startTime-endTime)) > 1 {
bm.lastAngle += math.Pi
}
pt1 := vector.NewVec2fRad(bm.lastAngle, scaledDistance).Add(endPos)
pt2 := vector.NewVec2fRad(s2.GetStartAngle(), scaledDistance).Add(startPos)
points = []vector.Vector2f{endPos, pt1, pt2, startPos}
} else {
if math.Abs(float64(startTime-endTime)) > 1 && bmath.AngleBetween32(endPos, bm.lastPoint, startPos) >= float32(settings.Dance.Flower.AngleOffset)*math32.Pi/180.0 {
bm.invert = -1 * bm.invert
newAngle = float32(settings.Dance.Flower.StreamAngleOffset) * math32.Pi / 180.0
}
angle := endPos.AngleRV(startPos) - newAngle*bm.invert
if math.Abs(float64(startTime-endTime)) <= 1 {
angle = bm.lastAngle
}
pt1 := vector.NewVec2fRad(bm.lastAngle+math.Pi, scaledDistance).Add(endPos)
pt2 := vector.NewVec2fRad(angle, scaledDistance).Add(startPos)
if scaledDistance > 2 {
bm.lastAngle = angle
}
points = []vector.Vector2f{endPos, pt1, pt2, startPos}
}
bm.bz = curves.NewBezierNA(points)
bm.endTime = endTime
bm.startTime = startTime
bm.lastPoint = endPos
return 2
}
func (bm *AngleOffsetMover) Update(time int64) vector.Vector2f {
t := bmath.ClampF32(float32(time-bm.endTime)/float32(bm.startTime-bm.endTime), 0, 1)
return bm.bz.PointAt(t)
}
func (bm *AngleOffsetMover) GetEndTime() int64 {
return bm.startTime
} | app/dance/movers/angleoffset.go | 0.664649 | 0.438545 | angleoffset.go | starcoder |
package util
// PacketHeader that is sent with every packet
// 24 bytes
type PacketHeader struct {
PacketFormat uint16 // 2020
GameMajorVersion uint8 // Game major version - "X.00"
GameMinorVersion uint8 // Game minor version - "1.XX"
PacketVersion uint8 // Version of this packet type, all start from 1
PacketID uint8 // Identifier for the packet type, see below
SessionUID uint64 // Unique identifier for the session
SessionTime float32 // Session timestamp
FrameIdentifier uint32 // Identifier for the frame the data was retrieved on
PlayerCarIndex uint8 // Index of player's car in the array
SecondaryPlayerCarIndex uint8 // Index of secondary player's car in the array
}
// PacketMotionData gives physics data for all the cars being driven.
// Note: All wheel arrays have the following order:
// RL, RR, FL, FR
// 1440 bytes
type PacketMotionData struct {
CarMotionData [22]CarMotionData // Data for all cars on track
SuspensionPosition [4]float32 // Note: All wheel arrays have the following order:
SuspensionVelocity [4]float32 // RL, RR, FL, FR
SuspensionAcceleration [4]float32 // RL, RR, FL, FR
WheelSpeed [4]float32 // Speed of each wheel
WheelSlip [4]float32 // Slip ratio for each wheel
LocalVelocityX float32 // Velocity in local space
LocalVelocityY float32 // Velocity in local space
LocalVelocityZ float32 // Velocity in local space
AngularVelocityX float32 // Angular velocity x-component
AngularVelocityY float32 // Angular velocity y-component
AngularVelocityZ float32 // Angular velocity z-component
AngularAccelerationX float32 // Angular velocity x-component
AngularAccelerationY float32 // Angular velocity y-component
AngularAccelerationZ float32 // Angular velocity z-component
FrontWheelsAngle float32 // Current front wheels angle in radians
}
// CarMotionData gives physics data for a car being driven.
type CarMotionData struct {
WorldPositionX float32 // World space X position
WorldPositionY float32 // World space Y position
WorldPositionZ float32 // World space Z position
WorldVelocityX float32 // Velocity in world space X
WorldVelocityY float32 // Velocity in world space Y
WorldVelocityZ float32 // Velocity in world space Z
WorldForwardDirX int16 // World space forward X direction (normalised)
WorldForwardDirY int16 // World space forward Y direction (normalised)
WorldForwardDirZ int16 // World space forward Z direction (normalised)
WorldRightDirX int16 // World space right X direction (normalised)
WorldRightDirY int16 // World space right Y direction (normalised)
WorldRightDirZ int16 // World space right Z direction (normalised)
GForceLateral float32 // Lateral G-Force component
GForceLongitudinal float32 // Longitudinal G-Force component
GForceVertical float32 // Vertical G-Force component
Yaw float32 // Yaw angle in radians
Pitch float32 // Pitch angle in radians
Roll float32 // Roll angle in radians
}
// PacketSessionData includes details about the current session in progress
// 227 bytes
type PacketSessionData struct {
Weather uint8 // Weather - 0 = clear, 1 = light cloud, 2 = overcast, 3 = light rain, 4 = heavy rain, 5 = storm
TrackTemperature int8 // Track temp. in degrees celsius
AirTemperature int8 // Air temp. in degrees celsius
TotalLaps uint8 // Total number of laps in this race
TrackLength uint16 // Track length in metres
SessionType uint8 // 0 = unknown, 1 = P1, 2 = P2, 3 = P3, 4 = Short P, 5 = Q1, 6 = Q2, 7 = Q3, 8 = Short Q, 9 = OSQ, 10 = R, 11 = R2, 12 = Time Trial
TrackID int8 // -1 for unknown, 0-21 for tracks, see appendix
Formula uint8 // Formula, 0 = F1 Modern, 1 = F1 Classic, 2 = F2, 3 = F1 Generic
SessionTimeLeft uint16 // Time left in session in seconds
SessionDuration uint16 // Session duration in seconds
PitSpeedLimit uint8 // Pit speed limit in kilometres per hour
GamePaused uint8 // Whether the game is paused
IsSpectating uint8 // Whether the player is spectating
SpectatorCarIndex uint8 // Index of the car being spectated
SLIProNativeSupport uint8 // SLI Pro support, 0 = inactive, 1 = active
NumMarshalZones uint8 // Number of marshal zones to follow
MarshalZones [21]MarshalZone // List of marshal zones – max 21
SafetyCarStatus uint8 // 0 = no safety car, 1 = full safety car, 2 = virtual safety car
NetworkGame uint8 // 0 = offline, 1 = online
NumWeatherForecastSample uint8 // Number of weather samples to follow
WeatherForecastSamples [20]WeatherForecastSample // Array of weather forecast samples
}
// MarshalZone describes each zone on the track and the current flag
type MarshalZone struct {
ZoneStart float32 // Fraction (0..1) of way through the lap the marshal zone starts
ZoneFlag int8 // -1 = invalid/unknown, 0 = none, 1 = green, 2 = blue, 3 = yellow, 4 = red
}
// WeatherForecastSample no idea what this does
type WeatherForecastSample struct {
SessionType uint8 // 0 = unknown, 1 = P1, 2 = P2, 3 = P3, 4 = Short P, 5 = Q1, 6 = Q2, 7 = Q3, 8 = Short Q, 9 = OSQ, 10 = R, 11 = R2, 12 = Time Trial
TimeOffset uint8 // Time in minutes the forecast is for
Weather uint8 // 0 = clear, 1 = light cloud, 2 = overcast, 3 = light rain, 4 = heavy rain, 5 = storm
TrackTemperature int8 // Track temp. in degrees celsius
AirTemperature int8 // Air temp. in degrees celsius
}
// PacketLapData gives details of all the cars in the session
// 1166 bytes
type PacketLapData struct {
LapData [22]LapData
}
// LapData gives lap details of a car in the session
type LapData struct {
LastLapTime float32 // Last lap time in seconds
CurrentLapTime float32 // Current time around the lap in seconds
Sector1TimeInMS uint16 // Sector 1 time in milliseconds
Sector2TimeInMS uint16 // Sector 2 time in milliseconds
BestLapTime float32 // Best lap time of the session in seconds
BestLapNum uint8 // Lap number best time achieved on
BestLapSector1TimeInMS uint16 // Sector 1 time of best lap in the session in milliseconds
BestLapSector2TimeInMS uint16 // Sector 2 time of best lap in the session in milliseconds
BestLapSector3TimeInMS uint16 // Sector 3 time of best lap in the session in milliseconds
BestOverallSector1TimeInMS uint16 // Best overall sector 1 time of the session in milliseconds
BestOverallSector1LapNum uint8 // Lap number best overall sector 1 time achieved on
BestOverallSector2TimeInMS uint16 // Best overall sector 2 time of the session in milliseconds
BestOverallSector2LapNum uint8 // Lap number best overall sector 2 time achieved on
BestOverallSector3TimeInMS uint16 // Best overall sector 3 time of the session in milliseconds
BestOverallSector3LapNum uint8 // Lap number best overall sector 3 time achieved on
LapDistance float32 // Distance vehicle is around current lap in metres – could be negative if line hasn’t been crossed yet
TotalDistance float32 // Total distance travelled in session in metres – could be negative if line hasn’t been crossed yet
SafetyCarDelta float32 // Delta in seconds for safety car
CarPosition uint8 // Car race position
CurrentLapNum uint8 // Current lap number
PitStatus uint8 // 0 = none, 1 = pitting, 2 = in pit area
Sector uint8 // 0 = sector1, 1 = sector2, 2 = sector3
CurrentLapInvalid uint8 // Current lap invalid - 0 = valid, 1 = invalid
Penalties uint8 // Accumulated time penalties in seconds to be added
GridPosition uint8 // Grid position the vehicle started the race in
DriverStatus uint8 // Status of driver - 0 = in garage, 1 = flying lap, 2 = in lap, 3 = out lap, 4 = on track
ResultStatus uint8 // Result status - 0 = invalid, 1 = inactive, 2 = active, 3 = finished, 4 = disqualified, 5 = not classified, 6 = retired
}
// PacketParticipantsData contains list of participants in the race
// 1189 bytes
type PacketParticipantsData struct {
NumActiveCars uint8 // Number of active cars in the data – should match number of cars on HUD
Participants [22]ParticipantData
}
// ParticipantData contains details about a participant
type ParticipantData struct {
AiControlled uint8 // Whether the vehicle is AI (1) or Human (0) controlled
DriverID uint8 // Driver id - see appendix
TeamID uint8 // Team id - see appendix
RaceNumber uint8 // Race number of the car
Nationality uint8 // Nationality of the driver
Name [48]byte // Name of participant in UTF-8 format – null terminated, Will be truncated with … (U+2026) if too long
YourTelemetry uint8 // The player's UDP setting, 0 = restricted, 1 = public
}
// PacketCarSetupData details the car setups for each vehicle in the session
// 1078 bytes
type PacketCarSetupData struct {
CarSetups [22]CarSetupData
}
// CarSetupData details the car setups for a vehicle
type CarSetupData struct {
FrontWing uint8 // Front wing aero
RearWing uint8 // Rear wing aero
OnThrottle uint8 // Differential adjustment on throttle (percentage)
OffThrottle uint8 // Differential adjustment off throttle (percentage)
FrontCamber float32 // Front camber angle (suspension geometry)
RearCamber float32 // Rear camber angle (suspension geometry)
FrontToe float32 // Front toe angle (suspension geometry)
RearToe float32 // Rear toe angle (suspension geometry)
FrontSuspension uint8 // Front suspension
RearSuspension uint8 // Rear suspension
FrontAntiRollBar uint8 // Front anti-roll bar
RearAntiRollBar uint8 // Front anti-roll bar
FrontSuspensionHeight uint8 // Front ride height
RearSuspensionHeight uint8 // Rear ride height
BrakePressure uint8 // Brake pressure (percentage)
BrakeBias uint8 // Brake bias (percentage)
RearLeftTyrePressure float32 // Rear left tyre pressure (PSI)
RearRightTyrePressure float32 // Rear right tyre pressure (PSI)
FrontLeftTyrePressure float32 // Front left tyre pressure (PSI)
FrontRightTyrePressure float32 // Front right tyre pressure (PSI)
Ballast uint8 // Ballast
FuelLoad float32 // Fuel load
}
// PacketCarTelemetryData details telemetry for all the cars in the race
// 1283 bytes
type PacketCarTelemetryData struct {
CarTelemetryData [22]CarTelemetryData
ButtonStatus uint32 // Bit flags specifying which buttons are being pressed currently - see appendices
MFDPanelIndex uint8 // Index of MFD panel open - 255 = MFD closed, Single player, race – 0 = Car setup, 1 = Pits, 2 = Damage, 3 = Engine, 4 = Temperatures
MFDPanelIndexSecondaryPlayer uint8 // See above
SuggestedGear int8 // Suggested gear for the player (1-8), 0 if no gear suggested
}
// CarTelemetryData details telemetry for a car in the race
// Note: All wheel arrays have the following order:
// RL, RR, FL, FR
type CarTelemetryData struct {
Speed uint16 // Speed of car in kilometres per hour
Throttle float32 // Amount of throttle applied (0.0 to 1.0)
Steer float32 // Steering (-1.0 (full lock left) to 1.0 (full lock right))
Brake float32 // Amount of brake applied (0.0 to 1.0)
Clutch uint8 // Amount of clutch applied (0 to 100)
Gear int8 // Gear selected (1-8, N=0, R=-1)
EngineRPM uint16 // Engine RPM
DRS uint8 // 0 = off, 1 = on
RevLightsPercent uint8 // Rev lights indicator (percentage)
BrakesTemperature [4]uint16 // Brakes temperature (celsius)
TyresSurfaceTemperature [4]uint8 // Tyres surface temperature (celsius)
TyresInnerTemperature [4]uint8 // Tyres inner temperature (celsius)
EngineTemperature uint16 // Engine temperature (celsius)
TyresPressure [4]float32 // Tyres pressure (PSI)
SurfaceType [4]uint8 // Driving surface, see appendices
}
// PacketCarStatusData details car statuses for all the cars in the race
// 1320 bytes
type PacketCarStatusData struct {
CarStatusData [22]CarStatusData
}
// CarStatusData details car statuses for a car in the race
// Note: All wheel arrays have the following order:
// RL, RR, FL, FR
type CarStatusData struct {
TractionControl uint8 // 0 (off) - 2 (high)
AntiLockBrakes uint8 // 0 (off) - 1 (on)
FuelMix uint8 // Fuel mix - 0 = lean, 1 = standard, 2 = rich, 3 = max
FrontBrakeBias uint8 // Front brake bias (percentage)
PitLimiterStatus uint8 // Pit limiter status - 0 = off, 1 = on
FuelInTank float32 // Current fuel mass
FuelCapacity float32 // Fuel capacity
FuelRemainingLaps float32 // Fuel remaining in terms of laps (value on MFD)
MaxRPM uint16 // Cars max RPM, point of rev limiter
IdleRPM uint16 // Cars idle RPM
MaxGears uint8 // Maximum number of gears
DRSAllowed uint8 // 0 = not allowed, 1 = allowed, -1 = unknown
DRSActivationDistance uint16 // 0 = DRS not available, non-zero - DRS will be available in [X] metres
TyresWear [4]uint8 // Tyre wear percentage
ActualTyreCompound uint8 // Technical tyre compound name (different by Formula)
VisualTyreCompound uint8 // Tyre compound name in everyday language
TyresAgeLaps uint8 // Age in laps of the current set of tyres
TyresDamage [4]uint8 // Tyre damage (percentage)
FrontLeftWingDamage uint8 // Front left wing damage (percentage)
FrontRightWingDamage uint8 // Front right wing damage (percentage)
RearWingDamage uint8 // Rear wing damage (percentage)
DRSFault uint8 // Indicator for DRS fault, 0 = OK, 1 = fault
EngineDamage uint8 // Engine damage (percentage)
GearBoxDamage uint8 // Gear box damage (percentage)
VehicleFIAFlags int8 // -1 = invalid/unknown, 0 = none, 1 = green, 2 = blue, 3 = yellow, 4 = red
ERSStoreEnergy float32 // ERS energy store in Joules
ERSDeployMode uint8 // ERS deployment mode, 0 = none, 1 = low, 2 = medium, 3 = high, 4 = overtake, 5 = hotlap
ERSHarvestedThisLapMGUK float32 // ERS energy harvested this lap by MGU-K
ERSHarvestedThisLapMGUH float32 // ERS energy harvested this lap by MGU-H
ERSDeployedThisLap float32 // ERS energy deployed this lap
}
// PacketFinalClassificationData details the final classification at the end of the race
// 815 bytes
type PacketFinalClassificationData struct {
NumCars uint8 // Number of cars in the final classification
ClassificationData [22]FinalClassificationData // Data for every car
}
// FinalClassificationData details the final classification at the end of the race
type FinalClassificationData struct {
Position uint8 // Finishing position
NumLaps uint8 // Number of laps completed
GridPosition uint8 // Grid position of the car
Points uint8 // Number of points scored
NumPitStops uint8 // Number of pit stops made
ResultStatus uint8 // Result status - 0 = invalid, 1 = inactive, 2 = active, 3 = finished, 4 = disqualified, 5 = not classified, 6 = retired
BestLapTime float32 // Best lap time of the session in seconds
TotalRaceTime float64 // Total race time in seconds without penalties
PenaltiesTime uint8 // Total penalties accumulated in seconds
NumPenalties uint8 // Number of penalties applied to this driver
NumTyreStints uint8 // Number of tyres stints up to maximum
TyreStintsActual [8]uint8 // Actual tyres used by this driver
TyreStintsVisual [8]uint8 // Visual tyres used by this driver
}
// PacketLobbyInfoData details the players currently in a multiplayer lobby
// 1145 bytes
type PacketLobbyInfoData struct {
NumPlayers uint8 // Number of players in the lobby data
LobbyPlayers [22]LobbyInfoData // Data for each player
}
// LobbyInfoData details the players currently in a multiplayer lobby
type LobbyInfoData struct {
AIControlled uint8 // Whether the vehicle is AI (1) or Human (0) controlled
TeamID uint8 // Team id - see appendix (255 if no team currently selected)
Nationality uint8 // Nationality of the driver
Name [48]byte // Name of participant in UTF-8 format – null terminated
ReadyStatus uint8 // 0 = not ready, 1 = ready, 2 = spectating
} | pkg/util/udp_structs.go | 0.561095 | 0.495911 | udp_structs.go | starcoder |
package plot
import (
"math"
)
// Axis defines an axis that defines how values are transformed to canvas space.
type Axis struct {
// Min value of the axis (in value space)
Min float64
// Max value of the axis (in value space)
Max float64
Flip bool
Ticks Ticks
MajorTicks int
MinorTicks int
Transform AxisTransform
}
// AxisTransform transforms values between canvas and value-space.
type AxisTransform interface {
ToCanvas(axis *Axis, v float64, screenMin, screenMax Length) Length
FromCanvas(axis *Axis, s Length, screenMin, screenMax Length) float64
}
// NewAxis creates a new axis.
func NewAxis() *Axis {
return &Axis{
Min: math.NaN(),
Max: math.NaN(),
Ticks: AutomaticTicks{},
MajorTicks: 5,
MinorTicks: 5,
}
}
// project projects points to canvas space using the given axes.
func project(data []Point, x, y *Axis, bounds Rect) []Point {
points := make([]Point, 0, len(data))
size := bounds.Size()
for _, p := range data {
p.X = x.ToCanvas(p.X, 0, size.X)
p.Y = y.ToCanvas(p.Y, 0, size.Y)
points = append(points, p)
}
return points
}
// projectcb projects points to canvas space with callbacks.
func projectcb(data []Point, x, y *Axis, bounds Rect, fn func(p Point)) {
size := bounds.Size()
for _, p := range data {
p.X = x.ToCanvas(p.X, 0, size.X)
p.Y = y.ToCanvas(p.Y, 0, size.Y)
fn(p)
}
}
// IsValid returns whether axis has been defined.
func (axis *Axis) IsValid() bool {
return !math.IsNaN(axis.Min) && !math.IsNaN(axis.Max)
}
func (axis *Axis) fixNaN() {
if math.IsNaN(axis.Min) {
axis.Min = 0
}
if math.IsNaN(axis.Max) {
axis.Max = 1
}
}
// lowhigh returns axis low and high values.
func (axis *Axis) lowhigh() (low, high float64) {
if axis.Flip {
return axis.Max, axis.Min
}
return axis.Min, axis.Max
}
// ToCanvas converts value to canvas space.
func (axis *Axis) ToCanvas(v float64, screenMin, screenMax Length) Length {
if axis.Transform != nil {
return axis.Transform.ToCanvas(axis, v, screenMin, screenMax)
}
low, high := axis.lowhigh()
n := (v - low) / (high - low)
return screenMin + n*(screenMax-screenMin)
}
// FromCanvas converts canvas point to value point.
func (axis *Axis) FromCanvas(s Length, screenMin, screenMax Length) float64 {
if axis.Transform != nil {
return axis.Transform.FromCanvas(axis, s, screenMin, screenMax)
}
low, high := axis.lowhigh()
n := (s - screenMin) / (screenMax - screenMin)
return low + n*(high-low)
}
// Include ensures that min and max can be displayed on the axis.
func (axis *Axis) Include(min, max float64) {
if math.IsNaN(axis.Min) {
axis.Min = min
} else {
axis.Min = math.Min(axis.Min, min)
}
if math.IsNaN(axis.Max) {
axis.Max = max
} else {
axis.Max = math.Max(axis.Max, max)
}
}
// MakeNice tries to adjust min, max such they look nice given the MajorTicks and MinorTicks.
func (axis *Axis) MakeNice() {
axis.Min, axis.Max = niceAxis(axis.Min, axis.Max, axis.MajorTicks, axis.MinorTicks)
axis.fixNaN()
}
// detectAxis automatically figures out axes using element stats.
func detectAxis(x, y *Axis, elements []Element) (X, Y *Axis) {
tx, ty := NewAxis(), NewAxis()
*tx, *ty = *x, *y
for _, element := range elements {
if stats, ok := tryGetStats(element); ok {
tx.Include(stats.Min.X, stats.Max.X)
ty.Include(stats.Min.Y, stats.Max.Y)
}
}
tx.Min, tx.Max = niceAxis(tx.Min, tx.Max, tx.MajorTicks, tx.MinorTicks)
ty.Min, ty.Max = niceAxis(ty.Min, ty.Max, ty.MajorTicks, ty.MinorTicks)
if !math.IsNaN(x.Min) {
tx.Min = x.Min
}
if !math.IsNaN(x.Max) {
tx.Max = x.Max
}
if !math.IsNaN(y.Min) {
ty.Min = y.Min
}
if !math.IsNaN(y.Max) {
ty.Max = y.Max
}
tx.fixNaN()
ty.fixNaN()
return tx, ty
}
// niceAxis calculates nice range for a given min, max or values.
func niceAxis(min, max float64, major, minor int) (nicemin, nicemax float64) {
span := niceNumber(max-min, false)
tickSpacing := niceNumber(span/(float64(major*minor)-1), true)
nicemin = math.Floor(min/tickSpacing) * tickSpacing
nicemax = math.Ceil(max/tickSpacing) * tickSpacing
return nicemin, nicemax
}
// ScreenSpaceTransform transforms using a custom func.
type ScreenSpaceTransform struct {
Transform func(v float64) float64
Inverse func(v float64) float64
}
// ToCanvas converts value to canvas space.
func (tx *ScreenSpaceTransform) ToCanvas(axis *Axis, v float64, screenMin, screenMax Length) Length {
low, high := axis.lowhigh()
n := (v - low) / (high - low)
if tx.Transform != nil {
n = tx.Transform(n)
}
return screenMin + n*(screenMax-screenMin)
}
// FromCanvas converts canvas point to value point.
func (tx *ScreenSpaceTransform) FromCanvas(axis *Axis, s Length, screenMin, screenMax Length) float64 {
low, high := axis.lowhigh()
n := (s - screenMin) / (screenMax - screenMin)
if tx.Inverse != nil {
n = tx.Inverse(n)
}
return low + n*(high-low)
} | axis.go | 0.875188 | 0.641128 | axis.go | starcoder |
package resp
import (
"math"
"strconv"
)
// Buffer is a utility buffer to write RESP values
type Buffer struct {
B []byte
scratch []byte
}
// Reset resets the buffer
func (b *Buffer) Reset() {
b.B = b.B[:0]
}
// SimpleString writes a RESP simple string to the buffer
func (b *Buffer) SimpleString(s string) {
b.B = appendSimpleString(b.B, s)
}
// BulkString writes a RESP bulk string to the buffer
func (b *Buffer) BulkString(s string) {
b.B = appendBulkString(b.B, s)
}
// BulkStringBytes writes a raw RESP bulk string to the buffer
func (b *Buffer) BulkStringBytes(data []byte) {
b.B = appendBulkStringRaw(b.B, data)
}
// Error writes a RESP error to the buffer
func (b *Buffer) Error(err string) {
b.B = appendError(b.B, err)
}
// Int writes a RESP integer to the buffer
func (b *Buffer) Int(n int64) {
b.B = appendInt(b.B, n)
}
// Array writes a RESP array header to the buffer
func (b *Buffer) Array(size int) {
b.B = appendArray(b.B, size)
}
// NullArray writes a null RESP array to the buffer
func (b *Buffer) NullArray() {
b.B = appendNullArray(b.B)
}
// NullString writes a null RESP bulk string to the buffer
func (b *Buffer) NullString() {
b.B = appendNullBulkString(b.B)
}
// BulkStringArray writes an array of RESP bulk strings to the buffer
func (b *Buffer) BulkStringArray(values ...string) {
b.B = appendBulkStringArray(b.B, values...)
}
// IntArray writes an array of RESP integers to the buffer
func (b *Buffer) IntArray(values ...int64) {
b.B = appendIntArray(b.B, values...)
}
// Arg writes RESP command arguments to the buffer
func (b *Buffer) Arg(args ...Arg) {
for i := range args {
a := &args[i]
b.write(a)
}
}
// write appends an arg to the buffer
// We can't use AppendRESP because numeric types need scratch buffer to append as bulk string
func (b *Buffer) write(a *Arg) {
switch a.typ {
case typString, typKey:
b.B = appendBulkString(b.B, a.str)
case typBuffer:
b.B = appendBulkStringRaw(b.B, a.buf)
case typInt:
b.scratch = strconv.AppendInt(b.scratch[:0], int64(a.num), 10)
b.B = appendBulkStringRaw(b.B, b.scratch)
case typFloat:
b.scratch = strconv.AppendFloat(b.scratch, math.Float64frombits(a.num), 'f', -1, 64)
b.B = appendBulkStringRaw(b.B, b.scratch)
case typUint:
b.scratch = strconv.AppendUint(b.scratch, a.num, 10)
b.B = appendBulkStringRaw(b.B, b.scratch)
case typTrue:
b.B = appendBulkString(b.B, "true")
case typFalse:
b.B = appendBulkString(b.B, "false")
default:
b.B = appendNullBulkString(b.B)
}
} | resp/buffer.go | 0.591841 | 0.522385 | buffer.go | starcoder |
package genex
import (
"math"
"regexp/syntax"
)
// Count computes the total number of matches the `input` regex would generate after whitelisting `charset`.
// The `infinite` argument caps the maximum boundary of repetition operators.
func Count(input, charset *syntax.Regexp, infinite int) float64 {
var count func(input, charset *syntax.Regexp, infinite int) float64
count = func(input, charset *syntax.Regexp, infinite int) float64 {
result := float64(0)
switch input.Op {
case syntax.OpStar, syntax.OpPlus, syntax.OpQuest, syntax.OpRepeat:
value := float64(1)
for _, sub := range input.Sub {
value *= count(sub, charset, infinite)
}
switch input.Op {
case syntax.OpStar:
input.Min = 0
input.Max = -1
case syntax.OpPlus:
input.Min = 1
input.Max = -1
case syntax.OpQuest:
input.Min = 0
input.Max = 1
}
if input.Max == -1 && infinite >= 0 {
input.Max = input.Min + infinite
}
if input.Max == -1 {
result = math.Inf(1)
} else if value > 1 {
if input.Min == input.Max {
result = math.Pow(value, float64(input.Min))
} else {
result = (math.Pow(value, float64(input.Max)+1) - 1) / (value - 1)
if input.Min > 0 {
result -= (math.Pow(value, float64(input.Min)+0) - 1) / (value - 1)
}
}
} else {
result = float64(input.Max-input.Min) + 1
}
case syntax.OpCharClass, syntax.OpAnyCharNotNL, syntax.OpAnyChar:
if input.Op != syntax.OpCharClass {
input = charset
}
for i := 0; i < len(input.Rune); i += 2 {
for j := 0; j < len(charset.Rune); j += 2 {
bounds := []float64{
math.Max(float64(input.Rune[i]), float64(charset.Rune[j])),
math.Min(float64(input.Rune[i+1]), float64(charset.Rune[j+1])),
}
if bounds[0] <= bounds[1] {
result += bounds[1] - bounds[0] + 1
}
}
}
case syntax.OpCapture, syntax.OpConcat:
result = 1
for _, sub := range input.Sub {
result *= count(sub, charset, infinite)
}
case syntax.OpAlternate:
for _, sub := range input.Sub {
result += count(sub, charset, infinite)
}
default:
result = 1
}
if math.IsNaN(result) {
result = math.Inf(1)
}
return math.Max(1, result)
}
if charset.Op != syntax.OpCharClass {
charset, _ = syntax.Parse(`[[:print:]]`, syntax.Perl)
}
return count(input, charset, infinite)
} | count.go | 0.76934 | 0.41117 | count.go | starcoder |
package main
import (
"math"
"github.com/jakoblorz/sdfx/render"
"github.com/jakoblorz/sdfx/sdf"
)
var (
Bailout = 2.0
Power = 10.0
Iterations = 15
Epsilon = 0.01
)
type Mandelbulb struct {
render.Material
}
func (m Mandelbulb) Evaluate(pos sdf.V3) float64 {
var (
z = pos
dr = 1.0
r = 0.0
)
for i := 0; i < Iterations; i++ {
r = z.Length()
if r > Bailout {
break
}
// convert to polar coordinates
var (
theta = math.Acos(z.Z / r)
phi = math.Atan2(z.Y, z.X)
)
dr = math.Pow(r, Power-1.0)*Power*dr + 1.0
// scale and rotate the point
var (
zr = math.Pow(r, Power)
)
theta = theta * Power
phi = phi * Power
// convert back to cartesian coordinates
z = sdf.V3{
X: (math.Sin(theta) * math.Cos(phi)) * zr,
Y: (math.Sin(phi) * math.Sin(theta)) * zr,
Z: (math.Cos(theta)) * zr,
}.Add(pos)
}
return 0.5 * math.Log(r) * r / dr
}
func (m Mandelbulb) EstimateNormal(pos sdf.V3) sdf.V3 {
return sdf.V3{
X: m.Evaluate(pos.Add(sdf.V3{X: Epsilon})) - m.Evaluate(pos.Sub(sdf.V3{X: Epsilon})),
Y: m.Evaluate(pos.Add(sdf.V3{Y: Epsilon})) - m.Evaluate(pos.Sub(sdf.V3{Y: Epsilon})),
Z: m.Evaluate(pos.Add(sdf.V3{Z: Epsilon})) - m.Evaluate(pos.Sub(sdf.V3{Z: Epsilon})),
}.Normalize()
}
func (m Mandelbulb) Hit(ray *render.Ray3, tMin float64, tMax float64) (bool, *render.HitRecord) {
oc := ray.Origin // A-C
a := ray.Direction.Dot(ray.Direction) // dot(B, B)
b := oc.Dot(ray.Direction) // dot(A-C, B)
c := oc.Dot(oc) - 2*2 // dot(A-C, A-C) - R*R
discriminant := b*b - a*c
if discriminant > 0 {
discriminantSquareRoot := math.Sqrt(discriminant)
temp := (-b - discriminantSquareRoot) / a
if !(temp < tMax && temp > tMin) {
temp = (-b + discriminantSquareRoot) / a
}
if temp < tMax && temp > tMin {
return m.hit(&render.Ray3{
Origin: ray.PointAt(temp),
Direction: ray.Direction,
}, tMin, tMax, 0)
}
}
return false, nil
}
func (m Mandelbulb) hit(ray *render.Ray3, tMin, tMax float64, depth int) (bool, *render.HitRecord) {
if depth >= 1000 {
return false, nil
}
t := m.Evaluate(ray.Origin)
if t < tMax && t > tMin {
p := ray.PointAt(t)
if t < 0.01 {
return true, render.RecordHit(t, ray.PointAt(t), m.EstimateNormal(ray.Origin), m)
}
return m.hit(&render.Ray3{
Origin: p,
Direction: ray.Direction,
}, tMin, tMax, depth+1)
}
return false, nil
} | mandelbulb.go | 0.71423 | 0.518973 | mandelbulb.go | starcoder |
package fp
func (l BoolList) FoldLeftBool(z bool, f func(bool, bool) bool) bool {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftString(z string, f func(string, bool) string) string {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftInt(z int, f func(int, bool) int) int {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftInt64(z int64, f func(int64, bool) int64) int64 {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftByte(z byte, f func(byte, bool) byte) byte {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftRune(z rune, f func(rune, bool) rune) rune {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftFloat32(z float32, f func(float32, bool) float32) float32 {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftFloat64(z float64, f func(float64, bool) float64) float64 {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftAny(z Any, f func(Any, bool) Any) Any {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftTuple2(z Tuple2, f func(Tuple2, bool) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftBoolList(z BoolList, f func(BoolList, bool) BoolList) BoolList {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftStringList(z StringList, f func(StringList, bool) StringList) StringList {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftIntList(z IntList, f func(IntList, bool) IntList) IntList {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftInt64List(z Int64List, f func(Int64List, bool) Int64List) Int64List {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftByteList(z ByteList, f func(ByteList, bool) ByteList) ByteList {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftRuneList(z RuneList, f func(RuneList, bool) RuneList) RuneList {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftFloat32List(z Float32List, f func(Float32List, bool) Float32List) Float32List {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftFloat64List(z Float64List, f func(Float64List, bool) Float64List) Float64List {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftAnyList(z AnyList, f func(AnyList, bool) AnyList) AnyList {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l BoolList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, bool) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e bool) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftBool(z bool, f func(bool, string) bool) bool {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftString(z string, f func(string, string) string) string {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftInt(z int, f func(int, string) int) int {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftInt64(z int64, f func(int64, string) int64) int64 {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftByte(z byte, f func(byte, string) byte) byte {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftRune(z rune, f func(rune, string) rune) rune {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftFloat32(z float32, f func(float32, string) float32) float32 {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftFloat64(z float64, f func(float64, string) float64) float64 {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftAny(z Any, f func(Any, string) Any) Any {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftTuple2(z Tuple2, f func(Tuple2, string) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftBoolList(z BoolList, f func(BoolList, string) BoolList) BoolList {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftStringList(z StringList, f func(StringList, string) StringList) StringList {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftIntList(z IntList, f func(IntList, string) IntList) IntList {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftInt64List(z Int64List, f func(Int64List, string) Int64List) Int64List {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftByteList(z ByteList, f func(ByteList, string) ByteList) ByteList {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftRuneList(z RuneList, f func(RuneList, string) RuneList) RuneList {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftFloat32List(z Float32List, f func(Float32List, string) Float32List) Float32List {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftFloat64List(z Float64List, f func(Float64List, string) Float64List) Float64List {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftAnyList(z AnyList, f func(AnyList, string) AnyList) AnyList {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l StringList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, string) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e string) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftBool(z bool, f func(bool, int) bool) bool {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftString(z string, f func(string, int) string) string {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftInt(z int, f func(int, int) int) int {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftInt64(z int64, f func(int64, int) int64) int64 {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftByte(z byte, f func(byte, int) byte) byte {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftRune(z rune, f func(rune, int) rune) rune {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftFloat32(z float32, f func(float32, int) float32) float32 {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftFloat64(z float64, f func(float64, int) float64) float64 {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftAny(z Any, f func(Any, int) Any) Any {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftTuple2(z Tuple2, f func(Tuple2, int) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftBoolList(z BoolList, f func(BoolList, int) BoolList) BoolList {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftStringList(z StringList, f func(StringList, int) StringList) StringList {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftIntList(z IntList, f func(IntList, int) IntList) IntList {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftInt64List(z Int64List, f func(Int64List, int) Int64List) Int64List {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftByteList(z ByteList, f func(ByteList, int) ByteList) ByteList {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftRuneList(z RuneList, f func(RuneList, int) RuneList) RuneList {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftFloat32List(z Float32List, f func(Float32List, int) Float32List) Float32List {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftFloat64List(z Float64List, f func(Float64List, int) Float64List) Float64List {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftAnyList(z AnyList, f func(AnyList, int) AnyList) AnyList {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l IntList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, int) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e int) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftBool(z bool, f func(bool, int64) bool) bool {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftString(z string, f func(string, int64) string) string {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftInt(z int, f func(int, int64) int) int {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftInt64(z int64, f func(int64, int64) int64) int64 {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftByte(z byte, f func(byte, int64) byte) byte {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftRune(z rune, f func(rune, int64) rune) rune {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftFloat32(z float32, f func(float32, int64) float32) float32 {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftFloat64(z float64, f func(float64, int64) float64) float64 {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftAny(z Any, f func(Any, int64) Any) Any {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftTuple2(z Tuple2, f func(Tuple2, int64) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftBoolList(z BoolList, f func(BoolList, int64) BoolList) BoolList {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftStringList(z StringList, f func(StringList, int64) StringList) StringList {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftIntList(z IntList, f func(IntList, int64) IntList) IntList {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftInt64List(z Int64List, f func(Int64List, int64) Int64List) Int64List {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftByteList(z ByteList, f func(ByteList, int64) ByteList) ByteList {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftRuneList(z RuneList, f func(RuneList, int64) RuneList) RuneList {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftFloat32List(z Float32List, f func(Float32List, int64) Float32List) Float32List {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftFloat64List(z Float64List, f func(Float64List, int64) Float64List) Float64List {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftAnyList(z AnyList, f func(AnyList, int64) AnyList) AnyList {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l Int64List) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, int64) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e int64) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftBool(z bool, f func(bool, byte) bool) bool {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftString(z string, f func(string, byte) string) string {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftInt(z int, f func(int, byte) int) int {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftInt64(z int64, f func(int64, byte) int64) int64 {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftByte(z byte, f func(byte, byte) byte) byte {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftRune(z rune, f func(rune, byte) rune) rune {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftFloat32(z float32, f func(float32, byte) float32) float32 {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftFloat64(z float64, f func(float64, byte) float64) float64 {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftAny(z Any, f func(Any, byte) Any) Any {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftTuple2(z Tuple2, f func(Tuple2, byte) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftBoolList(z BoolList, f func(BoolList, byte) BoolList) BoolList {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftStringList(z StringList, f func(StringList, byte) StringList) StringList {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftIntList(z IntList, f func(IntList, byte) IntList) IntList {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftInt64List(z Int64List, f func(Int64List, byte) Int64List) Int64List {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftByteList(z ByteList, f func(ByteList, byte) ByteList) ByteList {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftRuneList(z RuneList, f func(RuneList, byte) RuneList) RuneList {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftFloat32List(z Float32List, f func(Float32List, byte) Float32List) Float32List {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftFloat64List(z Float64List, f func(Float64List, byte) Float64List) Float64List {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftAnyList(z AnyList, f func(AnyList, byte) AnyList) AnyList {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l ByteList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, byte) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e byte) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftBool(z bool, f func(bool, rune) bool) bool {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftString(z string, f func(string, rune) string) string {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftInt(z int, f func(int, rune) int) int {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftInt64(z int64, f func(int64, rune) int64) int64 {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftByte(z byte, f func(byte, rune) byte) byte {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftRune(z rune, f func(rune, rune) rune) rune {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftFloat32(z float32, f func(float32, rune) float32) float32 {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftFloat64(z float64, f func(float64, rune) float64) float64 {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftAny(z Any, f func(Any, rune) Any) Any {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftTuple2(z Tuple2, f func(Tuple2, rune) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftBoolList(z BoolList, f func(BoolList, rune) BoolList) BoolList {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftStringList(z StringList, f func(StringList, rune) StringList) StringList {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftIntList(z IntList, f func(IntList, rune) IntList) IntList {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftInt64List(z Int64List, f func(Int64List, rune) Int64List) Int64List {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftByteList(z ByteList, f func(ByteList, rune) ByteList) ByteList {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftRuneList(z RuneList, f func(RuneList, rune) RuneList) RuneList {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftFloat32List(z Float32List, f func(Float32List, rune) Float32List) Float32List {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftFloat64List(z Float64List, f func(Float64List, rune) Float64List) Float64List {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftAnyList(z AnyList, f func(AnyList, rune) AnyList) AnyList {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l RuneList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, rune) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e rune) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftBool(z bool, f func(bool, float32) bool) bool {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftString(z string, f func(string, float32) string) string {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftInt(z int, f func(int, float32) int) int {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftInt64(z int64, f func(int64, float32) int64) int64 {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftByte(z byte, f func(byte, float32) byte) byte {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftRune(z rune, f func(rune, float32) rune) rune {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftFloat32(z float32, f func(float32, float32) float32) float32 {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftFloat64(z float64, f func(float64, float32) float64) float64 {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftAny(z Any, f func(Any, float32) Any) Any {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftTuple2(z Tuple2, f func(Tuple2, float32) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftBoolList(z BoolList, f func(BoolList, float32) BoolList) BoolList {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftStringList(z StringList, f func(StringList, float32) StringList) StringList {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftIntList(z IntList, f func(IntList, float32) IntList) IntList {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftInt64List(z Int64List, f func(Int64List, float32) Int64List) Int64List {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftByteList(z ByteList, f func(ByteList, float32) ByteList) ByteList {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftRuneList(z RuneList, f func(RuneList, float32) RuneList) RuneList {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftFloat32List(z Float32List, f func(Float32List, float32) Float32List) Float32List {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftFloat64List(z Float64List, f func(Float64List, float32) Float64List) Float64List {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftAnyList(z AnyList, f func(AnyList, float32) AnyList) AnyList {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float32List) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, float32) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e float32) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftBool(z bool, f func(bool, float64) bool) bool {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftString(z string, f func(string, float64) string) string {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftInt(z int, f func(int, float64) int) int {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftInt64(z int64, f func(int64, float64) int64) int64 {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftByte(z byte, f func(byte, float64) byte) byte {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftRune(z rune, f func(rune, float64) rune) rune {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftFloat32(z float32, f func(float32, float64) float32) float32 {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftFloat64(z float64, f func(float64, float64) float64) float64 {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftAny(z Any, f func(Any, float64) Any) Any {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftTuple2(z Tuple2, f func(Tuple2, float64) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftBoolList(z BoolList, f func(BoolList, float64) BoolList) BoolList {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftStringList(z StringList, f func(StringList, float64) StringList) StringList {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftIntList(z IntList, f func(IntList, float64) IntList) IntList {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftInt64List(z Int64List, f func(Int64List, float64) Int64List) Int64List {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftByteList(z ByteList, f func(ByteList, float64) ByteList) ByteList {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftRuneList(z RuneList, f func(RuneList, float64) RuneList) RuneList {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftFloat32List(z Float32List, f func(Float32List, float64) Float32List) Float32List {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftFloat64List(z Float64List, f func(Float64List, float64) Float64List) Float64List {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftAnyList(z AnyList, f func(AnyList, float64) AnyList) AnyList {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l Float64List) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, float64) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e float64) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftBool(z bool, f func(bool, Any) bool) bool {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftString(z string, f func(string, Any) string) string {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftInt(z int, f func(int, Any) int) int {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftInt64(z int64, f func(int64, Any) int64) int64 {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftByte(z byte, f func(byte, Any) byte) byte {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftRune(z rune, f func(rune, Any) rune) rune {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftFloat32(z float32, f func(float32, Any) float32) float32 {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftFloat64(z float64, f func(float64, Any) float64) float64 {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftAny(z Any, f func(Any, Any) Any) Any {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftTuple2(z Tuple2, f func(Tuple2, Any) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftBoolList(z BoolList, f func(BoolList, Any) BoolList) BoolList {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftStringList(z StringList, f func(StringList, Any) StringList) StringList {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftIntList(z IntList, f func(IntList, Any) IntList) IntList {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftInt64List(z Int64List, f func(Int64List, Any) Int64List) Int64List {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftByteList(z ByteList, f func(ByteList, Any) ByteList) ByteList {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftRuneList(z RuneList, f func(RuneList, Any) RuneList) RuneList {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftFloat32List(z Float32List, f func(Float32List, Any) Float32List) Float32List {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftFloat64List(z Float64List, f func(Float64List, Any) Float64List) Float64List {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftAnyList(z AnyList, f func(AnyList, Any) AnyList) AnyList {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l AnyList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, Any) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e Any) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftBool(z bool, f func(bool, Tuple2) bool) bool {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftString(z string, f func(string, Tuple2) string) string {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftInt(z int, f func(int, Tuple2) int) int {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftInt64(z int64, f func(int64, Tuple2) int64) int64 {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftByte(z byte, f func(byte, Tuple2) byte) byte {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftRune(z rune, f func(rune, Tuple2) rune) rune {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftFloat32(z float32, f func(float32, Tuple2) float32) float32 {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftFloat64(z float64, f func(float64, Tuple2) float64) float64 {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftAny(z Any, f func(Any, Tuple2) Any) Any {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftTuple2(z Tuple2, f func(Tuple2, Tuple2) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftBoolList(z BoolList, f func(BoolList, Tuple2) BoolList) BoolList {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftStringList(z StringList, f func(StringList, Tuple2) StringList) StringList {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftIntList(z IntList, f func(IntList, Tuple2) IntList) IntList {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftInt64List(z Int64List, f func(Int64List, Tuple2) Int64List) Int64List {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftByteList(z ByteList, f func(ByteList, Tuple2) ByteList) ByteList {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftRuneList(z RuneList, f func(RuneList, Tuple2) RuneList) RuneList {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftFloat32List(z Float32List, f func(Float32List, Tuple2) Float32List) Float32List {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftFloat64List(z Float64List, f func(Float64List, Tuple2) Float64List) Float64List {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftAnyList(z AnyList, f func(AnyList, Tuple2) AnyList) AnyList {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l Tuple2List) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, Tuple2) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e Tuple2) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftBool(z bool, f func(bool, BoolOption) bool) bool {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftString(z string, f func(string, BoolOption) string) string {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftInt(z int, f func(int, BoolOption) int) int {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftInt64(z int64, f func(int64, BoolOption) int64) int64 {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftByte(z byte, f func(byte, BoolOption) byte) byte {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftRune(z rune, f func(rune, BoolOption) rune) rune {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftFloat32(z float32, f func(float32, BoolOption) float32) float32 {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftFloat64(z float64, f func(float64, BoolOption) float64) float64 {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftAny(z Any, f func(Any, BoolOption) Any) Any {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, BoolOption) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftBoolList(z BoolList, f func(BoolList, BoolOption) BoolList) BoolList {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftStringList(z StringList, f func(StringList, BoolOption) StringList) StringList {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftIntList(z IntList, f func(IntList, BoolOption) IntList) IntList {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftInt64List(z Int64List, f func(Int64List, BoolOption) Int64List) Int64List {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftByteList(z ByteList, f func(ByteList, BoolOption) ByteList) ByteList {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftRuneList(z RuneList, f func(RuneList, BoolOption) RuneList) RuneList {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, BoolOption) Float32List) Float32List {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, BoolOption) Float64List) Float64List {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftAnyList(z AnyList, f func(AnyList, BoolOption) AnyList) AnyList {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l BoolOptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, BoolOption) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e BoolOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftBool(z bool, f func(bool, StringOption) bool) bool {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftString(z string, f func(string, StringOption) string) string {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftInt(z int, f func(int, StringOption) int) int {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftInt64(z int64, f func(int64, StringOption) int64) int64 {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftByte(z byte, f func(byte, StringOption) byte) byte {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftRune(z rune, f func(rune, StringOption) rune) rune {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftFloat32(z float32, f func(float32, StringOption) float32) float32 {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftFloat64(z float64, f func(float64, StringOption) float64) float64 {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftAny(z Any, f func(Any, StringOption) Any) Any {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, StringOption) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftBoolList(z BoolList, f func(BoolList, StringOption) BoolList) BoolList {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftStringList(z StringList, f func(StringList, StringOption) StringList) StringList {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftIntList(z IntList, f func(IntList, StringOption) IntList) IntList {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftInt64List(z Int64List, f func(Int64List, StringOption) Int64List) Int64List {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftByteList(z ByteList, f func(ByteList, StringOption) ByteList) ByteList {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftRuneList(z RuneList, f func(RuneList, StringOption) RuneList) RuneList {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, StringOption) Float32List) Float32List {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, StringOption) Float64List) Float64List {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftAnyList(z AnyList, f func(AnyList, StringOption) AnyList) AnyList {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l StringOptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, StringOption) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e StringOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftBool(z bool, f func(bool, IntOption) bool) bool {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftString(z string, f func(string, IntOption) string) string {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftInt(z int, f func(int, IntOption) int) int {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftInt64(z int64, f func(int64, IntOption) int64) int64 {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftByte(z byte, f func(byte, IntOption) byte) byte {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftRune(z rune, f func(rune, IntOption) rune) rune {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftFloat32(z float32, f func(float32, IntOption) float32) float32 {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftFloat64(z float64, f func(float64, IntOption) float64) float64 {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftAny(z Any, f func(Any, IntOption) Any) Any {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, IntOption) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftBoolList(z BoolList, f func(BoolList, IntOption) BoolList) BoolList {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftStringList(z StringList, f func(StringList, IntOption) StringList) StringList {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftIntList(z IntList, f func(IntList, IntOption) IntList) IntList {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftInt64List(z Int64List, f func(Int64List, IntOption) Int64List) Int64List {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftByteList(z ByteList, f func(ByteList, IntOption) ByteList) ByteList {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftRuneList(z RuneList, f func(RuneList, IntOption) RuneList) RuneList {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, IntOption) Float32List) Float32List {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, IntOption) Float64List) Float64List {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftAnyList(z AnyList, f func(AnyList, IntOption) AnyList) AnyList {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l IntOptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, IntOption) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e IntOption) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftBool(z bool, f func(bool, Int64Option) bool) bool {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftString(z string, f func(string, Int64Option) string) string {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftInt(z int, f func(int, Int64Option) int) int {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftInt64(z int64, f func(int64, Int64Option) int64) int64 {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftByte(z byte, f func(byte, Int64Option) byte) byte {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftRune(z rune, f func(rune, Int64Option) rune) rune {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftFloat32(z float32, f func(float32, Int64Option) float32) float32 {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftFloat64(z float64, f func(float64, Int64Option) float64) float64 {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftAny(z Any, f func(Any, Int64Option) Any) Any {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, Int64Option) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftBoolList(z BoolList, f func(BoolList, Int64Option) BoolList) BoolList {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftStringList(z StringList, f func(StringList, Int64Option) StringList) StringList {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftIntList(z IntList, f func(IntList, Int64Option) IntList) IntList {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftInt64List(z Int64List, f func(Int64List, Int64Option) Int64List) Int64List {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftByteList(z ByteList, f func(ByteList, Int64Option) ByteList) ByteList {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftRuneList(z RuneList, f func(RuneList, Int64Option) RuneList) RuneList {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, Int64Option) Float32List) Float32List {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, Int64Option) Float64List) Float64List {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftAnyList(z AnyList, f func(AnyList, Int64Option) AnyList) AnyList {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l Int64OptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, Int64Option) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e Int64Option) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftBool(z bool, f func(bool, ByteOption) bool) bool {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftString(z string, f func(string, ByteOption) string) string {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftInt(z int, f func(int, ByteOption) int) int {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftInt64(z int64, f func(int64, ByteOption) int64) int64 {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftByte(z byte, f func(byte, ByteOption) byte) byte {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftRune(z rune, f func(rune, ByteOption) rune) rune {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftFloat32(z float32, f func(float32, ByteOption) float32) float32 {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftFloat64(z float64, f func(float64, ByteOption) float64) float64 {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftAny(z Any, f func(Any, ByteOption) Any) Any {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, ByteOption) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftBoolList(z BoolList, f func(BoolList, ByteOption) BoolList) BoolList {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftStringList(z StringList, f func(StringList, ByteOption) StringList) StringList {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftIntList(z IntList, f func(IntList, ByteOption) IntList) IntList {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftInt64List(z Int64List, f func(Int64List, ByteOption) Int64List) Int64List {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftByteList(z ByteList, f func(ByteList, ByteOption) ByteList) ByteList {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftRuneList(z RuneList, f func(RuneList, ByteOption) RuneList) RuneList {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, ByteOption) Float32List) Float32List {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, ByteOption) Float64List) Float64List {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftAnyList(z AnyList, f func(AnyList, ByteOption) AnyList) AnyList {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l ByteOptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, ByteOption) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e ByteOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftBool(z bool, f func(bool, RuneOption) bool) bool {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftString(z string, f func(string, RuneOption) string) string {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftInt(z int, f func(int, RuneOption) int) int {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftInt64(z int64, f func(int64, RuneOption) int64) int64 {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftByte(z byte, f func(byte, RuneOption) byte) byte {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftRune(z rune, f func(rune, RuneOption) rune) rune {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftFloat32(z float32, f func(float32, RuneOption) float32) float32 {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftFloat64(z float64, f func(float64, RuneOption) float64) float64 {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftAny(z Any, f func(Any, RuneOption) Any) Any {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, RuneOption) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftBoolList(z BoolList, f func(BoolList, RuneOption) BoolList) BoolList {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftStringList(z StringList, f func(StringList, RuneOption) StringList) StringList {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftIntList(z IntList, f func(IntList, RuneOption) IntList) IntList {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftInt64List(z Int64List, f func(Int64List, RuneOption) Int64List) Int64List {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftByteList(z ByteList, f func(ByteList, RuneOption) ByteList) ByteList {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftRuneList(z RuneList, f func(RuneList, RuneOption) RuneList) RuneList {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, RuneOption) Float32List) Float32List {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, RuneOption) Float64List) Float64List {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftAnyList(z AnyList, f func(AnyList, RuneOption) AnyList) AnyList {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l RuneOptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, RuneOption) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e RuneOption) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftBool(z bool, f func(bool, Float32Option) bool) bool {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftString(z string, f func(string, Float32Option) string) string {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftInt(z int, f func(int, Float32Option) int) int {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftInt64(z int64, f func(int64, Float32Option) int64) int64 {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftByte(z byte, f func(byte, Float32Option) byte) byte {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftRune(z rune, f func(rune, Float32Option) rune) rune {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftFloat32(z float32, f func(float32, Float32Option) float32) float32 {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftFloat64(z float64, f func(float64, Float32Option) float64) float64 {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftAny(z Any, f func(Any, Float32Option) Any) Any {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, Float32Option) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftBoolList(z BoolList, f func(BoolList, Float32Option) BoolList) BoolList {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftStringList(z StringList, f func(StringList, Float32Option) StringList) StringList {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftIntList(z IntList, f func(IntList, Float32Option) IntList) IntList {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftInt64List(z Int64List, f func(Int64List, Float32Option) Int64List) Int64List {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftByteList(z ByteList, f func(ByteList, Float32Option) ByteList) ByteList {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftRuneList(z RuneList, f func(RuneList, Float32Option) RuneList) RuneList {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, Float32Option) Float32List) Float32List {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, Float32Option) Float64List) Float64List {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftAnyList(z AnyList, f func(AnyList, Float32Option) AnyList) AnyList {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float32OptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, Float32Option) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e Float32Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftBool(z bool, f func(bool, Float64Option) bool) bool {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftString(z string, f func(string, Float64Option) string) string {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftInt(z int, f func(int, Float64Option) int) int {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftInt64(z int64, f func(int64, Float64Option) int64) int64 {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftByte(z byte, f func(byte, Float64Option) byte) byte {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftRune(z rune, f func(rune, Float64Option) rune) rune {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftFloat32(z float32, f func(float32, Float64Option) float32) float32 {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftFloat64(z float64, f func(float64, Float64Option) float64) float64 {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftAny(z Any, f func(Any, Float64Option) Any) Any {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, Float64Option) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftBoolList(z BoolList, f func(BoolList, Float64Option) BoolList) BoolList {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftStringList(z StringList, f func(StringList, Float64Option) StringList) StringList {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftIntList(z IntList, f func(IntList, Float64Option) IntList) IntList {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftInt64List(z Int64List, f func(Int64List, Float64Option) Int64List) Int64List {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftByteList(z ByteList, f func(ByteList, Float64Option) ByteList) ByteList {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftRuneList(z RuneList, f func(RuneList, Float64Option) RuneList) RuneList {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, Float64Option) Float32List) Float32List {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, Float64Option) Float64List) Float64List {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftAnyList(z AnyList, f func(AnyList, Float64Option) AnyList) AnyList {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l Float64OptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, Float64Option) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e Float64Option) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftBool(z bool, f func(bool, AnyOption) bool) bool {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftString(z string, f func(string, AnyOption) string) string {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftInt(z int, f func(int, AnyOption) int) int {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftInt64(z int64, f func(int64, AnyOption) int64) int64 {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftByte(z byte, f func(byte, AnyOption) byte) byte {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftRune(z rune, f func(rune, AnyOption) rune) rune {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftFloat32(z float32, f func(float32, AnyOption) float32) float32 {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftFloat64(z float64, f func(float64, AnyOption) float64) float64 {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftAny(z Any, f func(Any, AnyOption) Any) Any {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, AnyOption) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftBoolList(z BoolList, f func(BoolList, AnyOption) BoolList) BoolList {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftStringList(z StringList, f func(StringList, AnyOption) StringList) StringList {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftIntList(z IntList, f func(IntList, AnyOption) IntList) IntList {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftInt64List(z Int64List, f func(Int64List, AnyOption) Int64List) Int64List {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftByteList(z ByteList, f func(ByteList, AnyOption) ByteList) ByteList {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftRuneList(z RuneList, f func(RuneList, AnyOption) RuneList) RuneList {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, AnyOption) Float32List) Float32List {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, AnyOption) Float64List) Float64List {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftAnyList(z AnyList, f func(AnyList, AnyOption) AnyList) AnyList {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l AnyOptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, AnyOption) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e AnyOption) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftBool(z bool, f func(bool, Tuple2Option) bool) bool {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftString(z string, f func(string, Tuple2Option) string) string {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftInt(z int, f func(int, Tuple2Option) int) int {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftInt64(z int64, f func(int64, Tuple2Option) int64) int64 {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftByte(z byte, f func(byte, Tuple2Option) byte) byte {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftRune(z rune, f func(rune, Tuple2Option) rune) rune {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftFloat32(z float32, f func(float32, Tuple2Option) float32) float32 {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftFloat64(z float64, f func(float64, Tuple2Option) float64) float64 {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftAny(z Any, f func(Any, Tuple2Option) Any) Any {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftTuple2(z Tuple2, f func(Tuple2, Tuple2Option) Tuple2) Tuple2 {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftBoolList(z BoolList, f func(BoolList, Tuple2Option) BoolList) BoolList {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftStringList(z StringList, f func(StringList, Tuple2Option) StringList) StringList {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftIntList(z IntList, f func(IntList, Tuple2Option) IntList) IntList {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftInt64List(z Int64List, f func(Int64List, Tuple2Option) Int64List) Int64List {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftByteList(z ByteList, f func(ByteList, Tuple2Option) ByteList) ByteList {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftRuneList(z RuneList, f func(RuneList, Tuple2Option) RuneList) RuneList {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftFloat32List(z Float32List, f func(Float32List, Tuple2Option) Float32List) Float32List {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftFloat64List(z Float64List, f func(Float64List, Tuple2Option) Float64List) Float64List {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftAnyList(z AnyList, f func(AnyList, Tuple2Option) AnyList) AnyList {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc}
func (l Tuple2OptionList) FoldLeftTuple2List(z Tuple2List, f func(Tuple2List, Tuple2Option) Tuple2List) Tuple2List {
acc := z
l.Foreach(func (e Tuple2Option) { acc = f(acc, e) })
return acc} | fp/bootstrap_list_foldleft.go | 0.755005 | 0.507995 | bootstrap_list_foldleft.go | starcoder |
package tree
import (
"github.com/bestgopher/fucker"
)
// BST节点
type bstTreeNode struct {
value interface{}
left *bstTreeNode
right *bstTreeNode
}
func (b *bstTreeNode) Value() interface{} { return b.value }
// 二叉查找树
type BinarySearchTree struct {
root *bstTreeNode
compare fucker.CompareFunc
}
func NewBinarySearchTree(compare fucker.CompareFunc, values ...interface{}) *BinarySearchTree {
t := &BinarySearchTree{compare: compare}
for _, v := range values {
t.Insert(v)
}
return t
}
// 插入节点
func (b *BinarySearchTree) Insert(value interface{}) {
if b.root == nil {
b.root = &bstTreeNode{value: value}
return
}
node := b.root
r := &bstTreeNode{value: value}
LOOP:
for {
switch b.compare(r, node) {
case fucker.Equal:
node.value = r.value
return
case fucker.Less:
if node.left == nil {
node.left = r
break LOOP
} else {
node = node.left
}
case fucker.Greater:
if node.right == nil {
node.right = r
break LOOP
} else {
node = node.right
}
default:
break LOOP
}
}
}
// 搜索节点
func (b *BinarySearchTree) Search(value interface{}) Value {
node := b.root
r := &bstTreeNode{value: value}
for node != nil {
switch b.compare(r, node) {
case fucker.Equal:
return node
case fucker.Less:
node = node.left
case fucker.Greater:
node = node.right
}
}
return nil
}
// 删除节点
// 当被删除节点为叶子节点时(即没有子节点),直接删除此节点
// 当被删除节点只有一个子节点时,删除此节点,然后子节点替换到此节点的位置
// 当被删除节点有两个子节点时,删除此节点,使用此节点的后继(前驱)节点替换此节点位置(后继:此节点右子树中的最小节点,前驱:此节点左子树中的最大节点)
func (b *BinarySearchTree) Delete(value interface{}) {
b.root = b.delete(b.root, value)
}
func (b *BinarySearchTree) delete(node *bstTreeNode, value interface{}) *bstTreeNode {
if node == nil {
return nil
}
r := &bstTreeNode{value: value}
// 比较当前节点与待删除节点的值
switch b.compare(r, node) {
case fucker.Equal:
if node.left == nil && node.right == nil { // 左右子节点都为空时
node = nil
} else if node.left == nil && node.right != nil { // 左子节点为空,右子节点不为空
node = node.right
} else if node.right == nil && node.left != nil { // 右子节点为空,左子节点不为空
node = node.left
} else {
// 左右子节点都不为空时,获取右子树的最小子节点与当前节点交换
n1, n2 := node, node.right
for n2.left != nil {
n1, n2 = n2, n2.left
}
node.value, n1.left = n2.value, n2.right
}
case fucker.Less:
node.left = b.delete(node.left, value)
case fucker.Greater:
node.right = b.delete(node.right, value)
}
return node
} | tree/binary_search_tree.go | 0.500488 | 0.470068 | binary_search_tree.go | starcoder |
package smd
import (
"fmt"
"math"
"math/rand"
"strings"
"time"
"github.com/gonum/matrix/mat64"
"github.com/gonum/stat/distmv"
)
const (
r2d = 180 / math.Pi
d2r = 1 / r2d
)
var (
σρ = math.Pow(5e-3, 2) // m , but all measurements in km.
σρDot = math.Pow(5e-6, 2) // m/s , but all measurements in km/s.
DSS34Canberra = NewSpecialStation("DSS34Canberra", 0.691750, 0, -35.398333, 148.981944, σρ, σρDot, 6)
DSS65Madrid = NewSpecialStation("DSS65Madrid", 0.834939, 0, 40.427222, 4.250556, σρ, σρDot, 6)
DSS13Goldstone = NewSpecialStation("DSS13Goldstone", 1.07114904, 0, 35.247164, 243.205, σρ, σρDot, 6)
)
// Station defines a ground station.
type Station struct {
Name string
R, V []float64 // position and velocity in ECEF
LatΦ, Longθ float64 // these are stored in radians!
Altitude, Elevation float64
RangeNoise, RangeRateNoise *distmv.Normal // Station noise
Planet CelestialObject
rowsH int // If estimating Cr in addition to position and velocity, this needs to be 7
}
// PerformMeasurement returns whether the SC is visible, and if so, the measurement.
func (s Station) PerformMeasurement(θgst float64, state State) Measurement {
// The station vectors are in ECEF, so let's convert the state to ECEF.
rECEF := ECI2ECEF(state.Orbit.R(), θgst)
vECEF := ECI2ECEF(state.Orbit.V(), θgst)
// Compute visibility for each station.
ρECEF, ρ, el, _ := s.RangeElAz(rECEF)
vDiffECEF := make([]float64, 3)
for i := 0; i < 3; i++ {
vDiffECEF[i] = (vECEF[i] - s.V[i]) / ρ
}
ρDot := mat64.Dot(mat64.NewVector(3, ρECEF), mat64.NewVector(3, vDiffECEF))
ρNoisy := ρ + s.RangeNoise.Rand(nil)[0]
ρDotNoisy := ρDot + s.RangeRateNoise.Rand(nil)[0]
return Measurement{el >= s.Elevation, ρNoisy, ρDotNoisy, ρ, ρDot, θgst, state, s}
}
// RangeElAz returns the range (in the SEZ frame), elevation and azimuth (in degrees) of a given R vector in ECEF.
func (s Station) RangeElAz(rECEF []float64) (ρECEF []float64, ρ, el, az float64) {
ρECEF = make([]float64, 3)
for i := 0; i < 3; i++ {
ρECEF[i] = rECEF[i] - s.R[i]
}
ρ = Norm(ρECEF)
rSEZ := MxV33(R3(s.Longθ), ρECEF)
rSEZ = MxV33(R2(math.Pi/2-s.LatΦ), rSEZ)
el = math.Asin(rSEZ[2]/ρ) * r2d
az = (2*math.Pi + math.Atan2(rSEZ[1], -rSEZ[0])) * r2d
return
}
func (s Station) String() string {
return fmt.Sprintf("%s (%f,%f); alt = %f km; el = %f deg", s.Name, s.LatΦ/d2r, s.Longθ/d2r, s.Altitude, s.Elevation)
}
// NewStation returns a new station. Angles in degrees.
func NewStation(name string, altitude, elevation, latΦ, longθ, σρ, σρDot float64) Station {
return NewSpecialStation(name, altitude, elevation, latΦ, longθ, σρ, σρDot, 6)
}
// NewSpecialStation same as NewStation but can specify the rows of H.
func NewSpecialStation(name string, altitude, elevation, latΦ, longθ, σρ, σρDot float64, rowsH int) Station {
R := GEO2ECEF(altitude, latΦ*d2r, longθ*d2r)
V := Cross([]float64{0, 0, EarthRotationRate}, R)
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
ρNoise, ok := distmv.NewNormal([]float64{0}, mat64.NewSymDense(1, []float64{σρ}), seed)
if !ok {
panic("NOK in Gaussian")
}
ρDotNoise, ok := distmv.NewNormal([]float64{0}, mat64.NewSymDense(1, []float64{σρDot}), seed)
if !ok {
panic("NOK in Gaussian")
}
return Station{name, R, V, latΦ * d2r, longθ * d2r, altitude, elevation, ρNoise, ρDotNoise, Earth, rowsH}
}
// Measurement stores a measurement of a station.
type Measurement struct {
Visible bool // Stores whether or not the attempted measurement was visible from the station.
Range, RangeRate float64 // Store the range and range rate
TrueRange, TrueRangeRate float64 // Store the true range and range rate
Timeθgst float64
State State
Station Station
}
// IsNil returns the state vector as a mat64.Vector
func (m Measurement) IsNil() bool {
return m.Range == m.RangeRate && m.RangeRate == 0
}
// StateVector returns the state vector as a mat64.Vector
func (m Measurement) StateVector() *mat64.Vector {
return mat64.NewVector(2, []float64{m.Range, m.RangeRate})
}
// HTilde returns the H tilde matrix for this given measurement.
func (m Measurement) HTilde() *mat64.Dense {
stationR := ECEF2ECI(m.Station.R, m.Timeθgst)
stationV := ECEF2ECI(m.Station.V, m.Timeθgst)
xS := stationR[0]
yS := stationR[1]
zS := stationR[2]
xSDot := stationV[0]
ySDot := stationV[1]
zSDot := stationV[2]
R := m.State.Orbit.R()
V := m.State.Orbit.V()
x := R[0]
y := R[1]
z := R[2]
xDot := V[0]
yDot := V[1]
zDot := V[2]
H := mat64.NewDense(2, m.Station.rowsH, nil)
// \partial \rho / \partial {x,y,z}
H.Set(0, 0, (x-xS)/m.Range)
H.Set(0, 1, (y-yS)/m.Range)
H.Set(0, 2, (z-zS)/m.Range)
// \partial \dot\rho / \partial {x,y,z}
H.Set(1, 0, (xDot-xSDot)/m.Range+(m.RangeRate/math.Pow(m.Range, 2))*(x-xS))
H.Set(1, 1, (yDot-ySDot)/m.Range+(m.RangeRate/math.Pow(m.Range, 2))*(y-yS))
H.Set(1, 2, (zDot-zSDot)/m.Range+(m.RangeRate/math.Pow(m.Range, 2))*(z-zS))
H.Set(1, 3, (x-xS)/m.Range)
H.Set(1, 4, (y-yS)/m.Range)
H.Set(1, 5, (z-zS)/m.Range)
return H
}
// CSV returns the data as CSV (does *not* include the new line)
func (m Measurement) CSV() string {
return fmt.Sprintf("%f,%f,%f,%f,", m.TrueRange, m.TrueRangeRate, m.Range, m.RangeRate)
}
// ShortCSV returns the noisy data as CSV (does *not* include the new line)
func (m Measurement) ShortCSV() string {
return fmt.Sprintf("%f,%f,", m.Range, m.RangeRate)
}
func (m Measurement) String() string {
return fmt.Sprintf("%s@%s", m.Station.Name, m.State.DT)
}
func BuiltinStationFromName(name string) Station {
switch strings.ToLower(name) {
case "dss13":
return DSS13Goldstone
case "dss34":
return DSS34Canberra
case "dss65":
return DSS65Madrid
default:
panic(fmt.Errorf("unknown station `%s`", name))
}
} | station.go | 0.697197 | 0.487429 | station.go | starcoder |
*/
package numf
import (
"math"
)
// Returns the delta between all consecutive floats. Returned slice length is one item shorter.
func Delta(slice []float64) []float64 {
if len(slice) < 2 {
return nil
}
res := make([]float64, len(slice)-1)
for i := 1; i < len(slice); i++ {
res[i-1] = slice[i] - slice[i-1]
}
return res
}
// Compares and returns maximum and minimum of two floats taking NaNs into account.
func Compare(x float64, y float64) (max float64, min float64) {
max = math.NaN()
min = math.NaN()
if !math.IsNaN(x) {
max = x
min = x
if !math.IsNaN(y) {
max = math.Max(max, y)
min = math.Min(min, y)
}
} else {
if !math.IsNaN(y) {
max = y
min = y
}
}
return max, min
}
// Finds the index of first occurrence of the given value.
func FindIndex(slice []float64, val float64) (int, bool) {
for i, item := range slice {
if item == val {
return i, true
}
}
return -1, false
}
// Inserts given value to given index into a slice.
func Insert(slice []float64, idx int, val float64) []float64 {
slice = append(slice, 0)
copy(slice[idx+1:], slice[idx:])
slice[idx] = val
return slice
}
// Removes an integer from given index
func RemoveFrom(slice []float64, s int) []float64 {
return append(slice[:s], slice[s+1:]...)
}
// Checks if given float exists in the slice.
func Contains(slice []float64, s float64) bool {
for _, a := range slice {
if a == s {
return true
}
}
return false
}
// Creates a slice of given size filled with given value.
func SliceOf(value float64, size int) []float64 {
if size <= 0 {
return nil
}
s := make([]float64, size)
for i := range s {
s[i] = value
}
return s
}
// Calculates a slice of cumulative sum from given slice.
func Cumsum(slice []float64) []float64 {
s := make([]float64, len(slice))
var previous float64
for i, v := range slice {
s[i] = previous + v
previous = s[i]
}
return s
}
// Multiplies two slices of same length element-wise.
func MulSlices(s1, s2 []float64) []float64 {
if len(s1) != len(s2) {
return nil
}
res := make([]float64, len(s1))
for i, v1 := range s1 {
res[i] = v1 * s2[i]
}
return res
} | numf/numeric.go | 0.800341 | 0.52141 | numeric.go | starcoder |
package bob
//go:generate go run ./gen/main.go . PartX3 Mat6Pair mat6big all PartNotX3 mat6small
import (
"bytes"
"fmt"
"io"
"math"
)
/*
* Whoever designed this "binary" format should take a hard look at
* himself in the mirror. Mixing 32 bit and 16 bit array sizes and
* special casing types we decode to by flags...
*
* We could almost use encoding/binary for this. If it weren't for the
* bloody 0-terminated strings, they screw everything up.
* Also, bufio would be nice, except that handling short reads from
* bufio made things 3-4 times slower (why bufio gives us short reads
* for 4 byte reads is...).
*
* This package is written with manual buffers and so many things
* unrolled and not done generically because every single change from
* the original generic/reflect approach has been carefully benchmarked
* and going from 900ms to decode a mid-sized model to 30ms felt like
* a good trade-off for the increased complexity of this code.
*/
// This keeps track of our reading. `buffer` is an internal buffer for
// future reads. `w` is a window into the buffer that keeps track of
// how much we've consumed.
type bobReader struct {
buffer [4096]byte
source io.Reader
eof bool
w []byte
}
type sTag [4]byte
type all struct {
b Bob `bobgen:"sect:BOB1:/BOB"`
}
func Read(r io.Reader) (*Bob, error) {
a := all{}
err := a.Decode(&bobReader{source: r})
if err != nil {
return nil, err
}
return &a.b, nil
}
// Data reader. We return a slice of an internal data buffer at least
// `l` bytes long. If the request amount is larger than the internal
// buffer the returned slice is allocated specifically for this
// request and doesn't use the buffer.
func (r *bobReader) data(l int, consume bool) ([]byte, error) {
if len(r.w) < l {
if l > len(r.buffer) {
ret := make([]byte, l, l)
copy(ret, r.w)
resid := len(r.w)
r.w = r.w[resid:]
if resid == l {
return ret, nil
}
n, err := r.source.Read(ret[resid:])
if n != l-resid {
err = io.EOF
}
if err != nil {
if err == io.EOF {
r.eof = true
}
return nil, err
}
return ret, nil
}
if r.eof {
return nil, io.EOF
}
resid := len(r.w)
if resid != 0 {
copy(r.buffer[:], r.w)
}
n, err := r.source.Read(r.buffer[resid:])
if err != nil {
r.eof = err == io.EOF
if r.eof && n+resid >= l {
err = nil
} else {
return nil, err
}
}
r.w = r.buffer[:n+resid]
}
ret := r.w
if consume {
r.eat(l)
}
_ = ret[l-1]
return ret, nil
}
func (r *bobReader) eat(l int) {
r.w = r.w[l:]
}
// The only time we peek at bytes forward is when sections are
// optional, but any time we don't find an optional section the next
// thing read will be either another section start or a section end.
func (r *bobReader) matchTag(expect sTag) (bool, error) {
b, err := r.data(4, false)
if err != nil {
return false, err
}
match := b[0] == expect[0] && b[1] == expect[1] && b[2] == expect[2] && b[3] == expect[3]
if match {
r.eat(4)
}
return match, nil
}
func (r *bobReader) sect(s, e sTag, optional bool, f func() error) error {
match, err := r.matchTag(s)
if err != nil {
return err
}
if !match {
if optional {
return nil
}
return fmt.Errorf("unexpected [%s], expected [%s]", r.w[:4], s)
}
err = f()
if err != nil {
return err
}
match, err = r.matchTag(e)
if err != nil {
return err
}
if !match {
return fmt.Errorf("unexpected [%s]%v, expected [%s]", r.w[:4], r.w[:4], e)
}
return nil
}
const (
len32 = uint(1 << iota)
)
type decoder interface {
Decode(*bobReader) error
}
func dec16(d []byte) int16 {
_ = d[1]
return int16(uint16(d[1]) | uint16(d[0])<<8)
}
func (r *bobReader) decode16() (int16, error) {
d, err := r.data(2, true)
if err != nil {
return 0, err
}
return dec16(d), nil
}
func dec32(d []byte) int32 {
_ = d[3]
return int32(uint32(d[3]) | uint32(d[2])<<8 | uint32(d[1])<<16 | uint32(d[0])<<24)
}
func (r *bobReader) decode32() (int32, error) {
d, err := r.data(4, true)
if err != nil {
return 0, err
}
return dec32(d), nil
}
func decf32(d []byte) float32 {
return math.Float32frombits(uint32(d[3]) | uint32(d[2])<<8 | uint32(d[1])<<16 | uint32(d[0])<<24)
}
func (r *bobReader) decodef32() (float32, error) {
d, err := r.data(4, true)
if err != nil {
return 0, err
}
return decf32(d), nil
}
func (r *bobReader) decodeString() (string, error) {
b, err := r.data(1, false)
if err != nil {
return "", err
}
off := bytes.IndexByte(b, 0)
if off != -1 {
// trivial case
s := string(b[:off])
r.eat(off + 1)
return s, nil
}
done := false
ret := make([]byte, 0)
for !done {
b, err := r.data(1, false)
if err != nil {
return "", err
}
off := bytes.IndexByte(b, 0)
if off != -1 {
done = true
ret = append(ret, b[:off]...)
r.eat(off + 1)
} else {
ret = append(ret, b...)
r.eat(len(b))
}
}
return string(ret), nil
}
type Bob struct {
Info string `bobgen:"sect:INFO:/INF,optional"`
Mat6 []material6 `bobgen:"sect:MAT6:/MAT,len32"`
Bodies []Body `bobgen:"sect:BODY:/BOD"`
}
type mat6Value struct {
Name string
Type int16
b int32
i int32
f float32
f4 [4]float32
s string
}
func (m *mat6Value) Decode(r *bobReader) error {
var err error
m.Name, _ = r.decodeString()
m.Type, err = r.decode16()
if err != nil {
return err
}
// XXX - make constants, not magic numbers here.
switch m.Type {
case 0:
m.i, err = r.decode32()
case 1:
m.b, err = r.decode32()
case 2:
m.f, err = r.decodef32()
case 5:
for i := range m.f4 {
m.f4[i], err = r.decodef32()
}
case 8:
m.s, err = r.decodeString()
default:
return fmt.Errorf("unknown mat6 type %x", m.Type)
}
return err
}
type Mat6Pair struct {
Name string
Value int16
}
type mat6big struct {
Technique int16
Effect string
Value []mat6Value
}
type mat6small struct {
TextureFile string
Ambient, Diffuse, Specular [3]int16
Transparency int32
SelfIllumination int16
Shininess [2]int16
TextureValue int16
EnvironmentMap Mat6Pair
BumpMap Mat6Pair
LightMap Mat6Pair
Map4 Mat6Pair
Map5 Mat6Pair
}
const matFlagBig = 0x2000000
type material6 struct {
Index int16
Flags int32
mat interface{}
}
func (m *material6) Decode(r *bobReader) error {
var err error
m.Index, _ = r.decode16()
m.Flags, err = r.decode32()
if err != nil {
return err
}
if m.Flags == matFlagBig {
mx := mat6big{}
err = mx.Decode(r)
m.mat = mx
return err
} else {
mx := mat6small{}
err = mx.Decode(r)
m.mat = mx
return err
}
}
type point struct {
typ int16
values [11]int32
}
func (p *point) Decode(r *bobReader) error {
t, err := r.decode16()
if err != nil {
return err
}
p.typ = t
sz := 0
switch p.typ {
case 0x1f:
sz = 11
case 0x1b:
sz = 9
case 0x19:
sz = 7
default:
return fmt.Errorf("unknown point type %d", p.typ)
}
d, err := r.data(sz*4, true)
if err != nil {
return err
}
for i := 0; i < sz; i++ {
p.values[i] = dec32(d[i*4 : i*4+4])
}
return nil
}
type Wgt struct {
Idx int16
Coeff int32
}
type Weight struct {
Weights []Wgt
}
type uv struct {
Idx int32
Values [6]float32
}
type faceList struct {
MaterialIndex int32
Faces [][4]int32 `bobgen:"len32"`
}
type faceListX3 struct {
MaterialIndex int32
Faces [][4]int32 `bobgen:"len32"`
UVList []uv `bobgen:"len32"`
}
type PartX3 struct {
FacesX3 []faceListX3
X3Vals [10]int32
}
type PartNotX3 struct {
Faces []faceList
}
type Part struct {
Flags int32
P interface{}
}
func (p *Part) Decode(r *bobReader) error {
f, err := r.decode32()
if err != nil {
return err
}
p.Flags = f
if (f & 0x10000000) != 0 {
px := PartX3{}
err = px.Decode(r)
p.P = px
} else {
px := PartNotX3{}
err = px.Decode(r)
p.P = px
}
return err
}
type Body struct {
Size int32
Flags int32
Bones []string `bobgen:"sect:BONE:/BON,len32,optional"`
Points []point `bobgen:"sect:POIN:/POI,len32,optional"`
Weights []Weight `bobgen:"sect:WEIG:/WEI,len32,optional"`
Parts []Part `bobgen:"sect:PART:/PAR,len32,optional"`
} | xt/bob/bob.go | 0.577257 | 0.477981 | bob.go | starcoder |
package config
import (
"os"
"strings"
"time"
authorizerd "github.com/yahoojapan/athenz-authorizer/v5"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
const (
// currentVersion represents the current configuration version.
currentVersion = "v2.0.0"
)
// Config represents the configuration (config.yaml) of authorization proxy.
type Config struct {
// Version represents the configuration file version.
Version string `yaml:"version"`
// Server represents the authorization proxy and the health check server configuration.
Server Server `yaml:"server"`
// Athenz represents the Athenz server connection configuration.
Athenz Athenz `yaml:"athenz"`
// Proxy represents the proxy destination configuration.
Proxy Proxy `yaml:"proxy"`
// Authorization represents the detail authorization configuration.
Authorization Authorization `yaml:"authorization"`
// Log represents the logger configuration.
Log Log `yaml:"log"`
}
// Server represents the authorization proxy and the health check server configuration.
type Server struct {
// Port represents the server listening port.
Port int `yaml:"port"`
// Timeout represents the maximum request handling duration.
Timeout string `yaml:"timeout"`
// ShutdownTimeout represents the duration before force shutdown.
ShutdownTimeout string `yaml:"shutdownTimeout"`
// ShutdownDelay represents the delay duration between the health check server shutdown and the client sidecar server shutdown.
ShutdownDelay string `yaml:"shutdownDelay"`
// TLS represents the TLS configuration of the authorization proxy.
TLS TLS `yaml:"tls"`
// HealthCheck represents the health check server configuration.
HealthCheck HealthCheck `yaml:"healthCheck"`
// Debug represents the debug server configuration.
Debug Debug `yaml:"debug"`
}
// TLS represents the TLS configuration of the authorization proxy.
type TLS struct {
// Enable represents whether to enable TLS.
Enable bool `yaml:"enable"`
// CertPath represents the server certificate file path.
CertPath string `yaml:"certPath"`
// KeyPath represents the private key file path of the server certificate.
KeyPath string `yaml:"keyPath"`
// CAPath represents the CA certificate chain file path for verifying client certificates.
CAPath string `yaml:"caPath"`
}
// HealthCheck represents the health check server configuration.
type HealthCheck struct {
// Port represents the server listening port.
Port int `yaml:"port"`
// Endpoint represents the health check endpoint (pattern).
Endpoint string `yaml:"endpoint"`
}
// Debug represents the debug server configuration.
type Debug struct {
// Enable represents if user want to enable debug server functionality.
Enable bool `yaml:"enable"`
// Port represents debug server port.
Port int `yaml:"port"`
// Dump represents whether to enable memory dump functionality.
Dump bool `yaml:"dump"`
// Profiling represents whether to enable profiling functionality.
Profiling bool `yaml:"profiling"`
}
// Athenz represents the Athenz server connection configuration.
type Athenz struct {
// URL represents the Athenz (ZMS or ZTS) API URL.
URL string `yaml:"url"`
// Timeout represents the request timeout duration to Athenz server.
Timeout string `yaml:"timeout"`
// CAPath represents the CA certificate chain file path for verifying Athenz server certificate.
CAPath string `yaml:"caPath"`
}
// Proxy represents the proxy destination configuration.
type Proxy struct {
// Scheme represents the HTTP URL scheme of the proxy destination, default is http.
Scheme string `yaml:"scheme"`
// Host represents the proxy destination host, for example, localhost.
Host string `yaml:"host"`
// Port represents the proxy destination port number.
Port uint16 `yaml:"port"`
// BufferSize represents the reverse proxy buffer size.
BufferSize uint64 `yaml:"bufferSize"`
// OriginHealthCheckPaths represents health check paths of your origin application.
// WARNING!!! Setting this configuration may introduce security hole in your system. ONLY set this configuration as the application's health check endpoint.
// Tips for performance: define your health check endpoint with a different length from the most frequently used endpoint, for example, use `/healthcheck` (len: 12) when `/most_used` (len: 10), instead of `/healthccc` (len: 10)
OriginHealthCheckPaths []string `yaml:"originHealthCheckPaths"`
// Transport exposes http.Transport parameters
Transport Transport `yaml:"transport,omitempty"`
}
// Authorization represents the detail authorization configuration.
type Authorization struct {
// AthenzDomains represents Athenz domains containing the RBAC policies.
AthenzDomains []string `yaml:"athenzDomains"`
// PublicKey represents the configuration to fetch Athenz public keys.
PublicKey PublicKey `yaml:"publicKey"`
// Policy represents the configuration to fetch Athenz policies.
Policy Policy `yaml:"policy"`
// JWK represents the configuration to fetch Athenz JWK.
JWK JWK `yaml:"jwk"`
// AccessToken represents the configuration to control access token verification.
AccessToken AccessToken `yaml:"accessToken"`
// RoleToken represents the configuration to control role token verification.
RoleToken RoleToken `yaml:"roleToken"`
}
// PublicKey represents the configuration to fetch Athenz public keys.
type PublicKey struct {
// SysAuthDomain represents the system authentication domain of Athenz.
SysAuthDomain string `yaml:"sysAuthDomain"`
// RefreshPeriod represents the duration of the refresh period.
RefreshPeriod string `yaml:"refreshPeriod"`
// RetryDelay represents the duration between each retry.
RetryDelay string `yaml:"retryDelay"`
// ETagExpiry represents the duration before Etag expires.
ETagExpiry string `yaml:"eTagExpiry"`
// ETagPurgePeriod represents the duration of purging expired items in the ETag cache.
ETagPurgePeriod string `yaml:"eTagPurgePeriod"`
}
// Policy represents the configuration to fetch Athenz policies.
type Policy struct {
// Disable decides whether to check the policy.
Disable bool `yaml:"disable"`
// ExpiryMargin represents the policy expiry margin to force refresh policies beforehand.
ExpiryMargin string `yaml:"expiryMargin"`
// RefreshPeriod represents the duration of the refresh period.
RefreshPeriod string `yaml:"refreshPeriod"`
// PurgePeriod represents the duration of purging expired items in the cache.
PurgePeriod string `yaml:"purgePeriod"`
// RetryDelay represents the duration between each retry.
RetryDelay string `yaml:"retryDelay"`
// RetryAttempts represents number of attempts to retry.
RetryAttempts int `yaml:"retryAttempts"`
// MappingRules represents translation rules for determining action and resource.
MappingRules map[string][]authorizerd.Rule `yaml:"mappingRules"`
}
// JWK represents the configuration to fetch Athenz JWK.
type JWK struct {
// RefreshPeriod represents the duration of the refresh period.
RefreshPeriod string `yaml:"refreshPeriod"`
// RetryDelay represents the duration between each retry.
RetryDelay string `yaml:"retryDelay"`
// URLs represents URLs that delivers JWK Set excluding athenz.
URLs []string `yaml:"urls"`
}
// AccessToken represents the configuration to control access token verification.
type AccessToken struct {
// Enable decides whether to verify access token.
Enable bool `yaml:"enable"`
// VerifyCertThumbprint represents whether to enforce certificate thumbprint verification.
VerifyCertThumbprint bool `yaml:"verifyCertThumbprint"`
// VerifyClientID represents whether to enforce certificate common name and client_id verification.
VerifyClientID bool `yaml:"verifyClientID"`
// AuthorizedClientIDs represents list of allowed client_id and common name.
AuthorizedClientIDs map[string][]string `yaml:"authorizedClientIDs"`
// CertBackdateDuration represents the certificate issue time backdating duration. (for usecase: new cert + old token)
CertBackdateDuration string `yaml:"certBackdateDuration"`
// CertOffsetDuration represents the certificate issue time offset when comparing with the issue time of the access token. (for usecase: new cert + old token)
CertOffsetDuration string `yaml:"certOffsetDuration"`
}
// RoleToken represents the configuration to control role token verification.
type RoleToken struct {
// Enable decides whether to verify role token.
Enable bool `yaml:"enable"`
// RoleAuthHeader represents the HTTP header for extracting the role token.
RoleAuthHeader string `yaml:"roleAuthHeader"`
}
// Log represents the logger configuration.
type Log struct {
// Level represents the logger output level. Values: "debug", "info", "warn", "error", "fatal".
Level string `yaml:"level"`
// Color represents whether to print ANSI escape code.
Color bool `yaml:"color"`
}
// Transport exposes a subset of Transport parameters. reference: https://github.com/golang/go/blob/master/src/net/http/transport.go#L95
type Transport struct {
TLSHandshakeTimeout time.Duration `yaml:"tlsHandshakeTimeout,omitempty"`
DisableKeepAlives bool `yaml:"disableKeepAlives,omitempty"`
DisableCompression bool `yaml:"disableCompression,omitempty"`
MaxIdleConns int `yaml:"maxIdleConns,omitempty"`
MaxIdleConnsPerHost int `yaml:"maxIdleConnsPerHost,omitempty"`
MaxConnsPerHost int `yaml:"maxConnsPerHost,omitempty"`
IdleConnTimeout time.Duration `yaml:"idleConnTimeout,omitempty"`
ResponseHeaderTimeout time.Duration `yaml:"responseHeaderTimeout,omitempty"`
ExpectContinueTimeout time.Duration `yaml:"expectContinueTimeout,omitempty"`
MaxResponseHeaderBytes int64 `yaml:"maxResponseHeaderBytes,omitempty"`
WriteBufferSize int `yaml:"writeBufferSize,omitempty"`
ReadBufferSize int `yaml:"readBufferSize,omitempty"`
ForceAttemptHTTP2 bool `yaml:"forceAttemptHTTP2,omitempty"`
}
// New returns the decoded configuration YAML file as *Config struct. Returns non-nil error if any.
func New(path string) (*Config, error) {
f, err := os.OpenFile(path, os.O_RDONLY, 0600)
if err != nil {
return nil, errors.Wrap(err, "OpenFile failed")
}
cfg := new(Config)
err = yaml.NewDecoder(f).Decode(&cfg)
if err != nil {
return nil, errors.Wrap(err, "decode file failed")
}
return cfg, nil
}
// GetVersion returns the current configuration version of the authorization proxy.
func GetVersion() string {
return currentVersion
}
// GetActualValue returns the environment variable value if the given val has "_" prefix and suffix, otherwise returns val directly.
func GetActualValue(val string) string {
if checkPrefixAndSuffix(val, "_", "_") {
return os.Getenv(strings.TrimPrefix(strings.TrimSuffix(val, "_"), "_"))
}
return val
}
// checkPrefixAndSuffix checks if the given string has given prefix and suffix.
func checkPrefixAndSuffix(str, pref, suf string) bool {
return strings.HasPrefix(str, pref) && strings.HasSuffix(str, suf)
} | config/config.go | 0.749179 | 0.426919 | config.go | starcoder |
package runtime
import "reflect"
func GT(left interface{}, right interface{}) bool {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) > typedRight
case float64:
return float64(typedLeft) > typedRight
default:
panic("can not compare int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft > float64(typedRight)
case float64:
return typedLeft > typedRight
default:
panic("can not compare float with " + reflect.TypeOf(right).String())
}
default:
panic("compare does not support " + reflect.TypeOf(left).String())
}
}
func GE(left interface{}, right interface{}) bool {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) >= typedRight
case float64:
return float64(typedLeft) >= typedRight
default:
panic("can not compare int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft >= float64(typedRight)
case float64:
return typedLeft >= typedRight
default:
panic("can not compare float with " + reflect.TypeOf(right).String())
}
default:
panic("compare does not support " + reflect.TypeOf(left).String())
}
}
func LT(left interface{}, right interface{}) bool {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) < typedRight
case float64:
return float64(typedLeft) < typedRight
default:
panic("can not compare int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft < float64(typedRight)
case float64:
return typedLeft < typedRight
default:
panic("can not compare float with " + reflect.TypeOf(right).String())
}
default:
panic("compare does not support " + reflect.TypeOf(left).String())
}
}
func LE(left interface{}, right interface{}) bool {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) <= typedRight
case float64:
return float64(typedLeft) <= typedRight
default:
panic("can not compare int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft <= float64(typedRight)
case float64:
return typedLeft <= typedRight
default:
panic("can not compare float with " + reflect.TypeOf(right).String())
}
default:
panic("compare does not support " + reflect.TypeOf(left).String())
}
}
func EQ(left interface{}, right interface{}) bool {
switch typedLeft := left.(type) {
case int:
switch typedRight := right.(type) {
case int:
return int(typedLeft) == typedRight
case float64:
return float64(typedLeft) == typedRight
default:
panic("can not compare int with " + reflect.TypeOf(right).String())
}
case float64:
switch typedRight := right.(type) {
case int:
return typedLeft == float64(typedRight)
case float64:
return typedLeft == typedRight
default:
panic("can not compare float with " + reflect.TypeOf(right).String())
}
case string:
switch typedRight := right.(type) {
case string:
return typedLeft == typedRight
default:
panic("can not compare string with " + reflect.TypeOf(right).String())
}
default:
panic("compare does not support " + reflect.TypeOf(left).String())
}
} | docstore/runtime/comparison.go | 0.657538 | 0.625867 | comparison.go | starcoder |
package tree
import "github.com/strict-lang/sdk/pkg/compiler/input"
// Node is implemented by every node of the tree.
type Node interface {
Locate() input.Region
// Accept makes the visitor visit this node.
Accept(visitor Visitor)
// AcceptRecursive makes the visitor visit this node and its children.
AcceptRecursive(visitor Visitor)
// Matches checks whether the instance matches the passed node. It does not
// take positions into account.
Matches(node Node) bool
// Enclosing returns the node that encloses the passed node. In the case
// of parameters this is the method they belong to.
EnclosingNode() (node Node, exists bool)
SetEnclosingNode(target Node)
}
// Named is implemented by all nodes that have a name.
type Named interface {
// Name returns the nodes name.
Name() string
}
type NodeKind int
const (
invalidKind NodeKind = iota
UnknownNodeKind
expressionKindBegin
IdentifierNodeKind
StringLiteralNodeKind
NumberLiteralNodeKind
ChainExpressionNodeKind
ListSelectExpressionNodeKind
BinaryExpressionNodeKind
UnaryExpressionNodeKind
PostfixExpressionNodeKind
CreateExpressionNodeKind
CallArgumentNodeKind
CallExpressionNodeKind
LetBindingNodeKind
expressionKindEnd
statementKindBegin
ConditionalStatementNodeKind
InvalidStatementNodeKind
BreakStatementNodeKind
YieldStatementNodeKind
StatementBlockNodeKind
AssertStatementNodeKind
ReturnStatementNodeKind
ImportStatementNodeKind
EmptyStatementNodeKind
TestStatementNodeKind
AssignStatementNodeKind
ExpressionStatementNodeKind
ForEachLoopStatementNodeKind
RangedLoopStatementNodeKind
ImplementStatementNodeKind
ListExpressionNodeKind
GenericStatementNodeKind
statementKindEnd
declarationKindBegin
ParameterNodeKind
FieldDeclarationNodeKind
MethodDeclarationNodeKind
ClassDeclarationNodeKind
ConstructorDeclarationNodeKind
declarationKindEnd
typeNameKindBegin
TypeNameNodeGroup // Used only in parsing
ListTypeNameNodeKind
GenericTypeNameNodeKind
ConcreteTypeNameNodeKind
OptionalTypeNameNodeKind
typeNameKindEnd
TranslationUnitNodeKind
WildcardNodeKind
)
var nodeKindNames = map[NodeKind]string{
UnknownNodeKind: "Unknown",
IdentifierNodeKind: "Identifier",
StringLiteralNodeKind: "StringLiteral",
NumberLiteralNodeKind: "NumberLiteral",
ChainExpressionNodeKind: "ChainExpression",
ListSelectExpressionNodeKind: "ListSelectExpression",
BinaryExpressionNodeKind: "BinaryExpression",
UnaryExpressionNodeKind: "UnaryExpression",
PostfixExpressionNodeKind: "PostfixExpression",
CreateExpressionNodeKind: "CreateExpression",
CallArgumentNodeKind: "CallArgument",
CallExpressionNodeKind: "CallExpression",
ConditionalStatementNodeKind: "ConditionalStatement",
InvalidStatementNodeKind: "InvalidStatement",
YieldStatementNodeKind: "YieldStatement",
StatementBlockNodeKind: "StatementBlock",
AssertStatementNodeKind: "AssertStatement",
ReturnStatementNodeKind: "ReturnStatement",
ImportStatementNodeKind: "ImportStatement",
EmptyStatementNodeKind: "EmptyStatement",
BreakStatementNodeKind: "BreakStatement",
TestStatementNodeKind: "TestStatement",
AssignStatementNodeKind: "AssignStatement",
ExpressionStatementNodeKind: "ExpressionStatement",
ForEachLoopStatementNodeKind: "ForEachLoopStatement",
RangedLoopStatementNodeKind: "RangedLoopStatement",
ParameterNodeKind: "Parameter",
FieldDeclarationNodeKind: "FieldDeclaration",
MethodDeclarationNodeKind: "MethodDeclaration",
ClassDeclarationNodeKind: "ClassDeclaration",
ConstructorDeclarationNodeKind: "ConstructorDeclaration",
TypeNameNodeGroup: "TypeName",
ListTypeNameNodeKind: "ListTypeName",
GenericTypeNameNodeKind: "GenericTypeName",
ConcreteTypeNameNodeKind: "ConcreteTypeName",
OptionalTypeNameNodeKind: "OptionalTypeName",
TranslationUnitNodeKind: "TranslationUnit",
LetBindingNodeKind: "LetBinding",
ImplementStatementNodeKind: "ImplementStatement",
GenericStatementNodeKind: "GenericStatement",
WildcardNodeKind: "Wildcard",
}
// IsExpression returns true if the kind is an expression.
func (kind NodeKind) IsExpression() bool {
return kind.isInExclusiveRange(kind, expressionKindBegin, expressionKindEnd)
}
// IsStatement returns true if the kind is a statement.
func (kind NodeKind) IsStatement() bool {
return kind.isInExclusiveRange(kind, statementKindBegin, statementKindEnd)
}
// IsDeclaration returns true if the kind is a declaration.
func (kind NodeKind) IsDeclaration() bool {
return kind.isInExclusiveRange(kind, declarationKindBegin, declarationKindEnd)
}
func (kind NodeKind) isInExclusiveRange(tested, begin, end NodeKind) bool {
return tested > begin && tested < end
}
func (kind NodeKind) Name() string {
name, ok := nodeKindNames[kind]
if ok {
return name
}
return "invalid"
}
func (kind NodeKind) String() string {
return kind.Name()
} | pkg/compiler/grammar/tree/node.go | 0.639511 | 0.518973 | node.go | starcoder |
package main
import (
"fmt"
)
// Key ...
type Key int
// Value ...
type Value interface{}
// Node ...
type Node struct {
key Key
value Value
left *Node
right *Node
}
// Map implements a map from Key to Value.
// The underlying datastructure is BST (binary search tree).
// It is not guaranteed to be auto-balanced.
type Map struct {
root *Node
}
// Insert returns an error if the key already exists.
// Problems with v *Value ??
func (m *Map) Insert(k Key, v Value) {
newNode := &Node{
key: k,
value: v,
}
if m.root == nil {
m.root = newNode
return
}
curNode := m.root
for {
if curNode.key == k {
curNode.value = v
break
}
if curNode.key < k {
if curNode.right == nil {
curNode.right = newNode
break
}
curNode = curNode.right
continue
}
if curNode.left == nil {
curNode.left = newNode
break
}
curNode = curNode.left
}
}
// Find returns a pointer on value associated with the key.
// Returns an false if the key is not found.
func (m *Map) Find(k Key) (*Value, bool) {
curNode := m.root
for curNode != nil {
if curNode.key == k {
return &curNode.value, true
}
if curNode.key > k {
curNode = curNode.left
continue
}
curNode = curNode.right
}
return nil, false
}
func (m *Map) printPartTree(r *Node) string {
str := fmt.Sprintf("(k: %v, v: %v)\n", r.key, r.value)
if r.left != nil {
str += m.printPartTree(r.left)
}
if r.right != nil {
str += m.printPartTree(r.right)
}
return str
}
// Print map in prefix traverse
func (m *Map) String() string {
if m.root == nil {
return "Map is empty"
}
return m.printPartTree(m.root)
}
// Rm removes a given key if it is present in the map.
func (m *Map) Rm(k Key) {
if m.root == nil {
return
}
parent := m.root
curNode := m.root
for curNode != nil {
if curNode.key == k {
break
}
parent = curNode
if curNode.key > k {
curNode = curNode.left
continue
}
curNode = curNode.right
}
if curNode == nil {
return // there is no key in map
}
if curNode.left == nil && curNode.right == nil {
if curNode == m.root {
m.root = nil
return
}
if parent.left == curNode {
parent.left = nil
} else {
parent.right = nil
}
return
}
if curNode.left == nil {
if curNode == m.root {
m.root = curNode.right
return
}
if parent.left == curNode {
parent.left = curNode.right
return
}
parent.right = curNode.right
return
}
if curNode.right == nil {
if curNode == m.root {
m.root = curNode.left
return
}
if parent.left == curNode {
parent.left = curNode.left
return
}
parent.right = curNode.left
return
}
if curNode.right.left == nil {
curNode.key = curNode.right.key
curNode.value = curNode.right.value
curNode.right = curNode.right.right
return
}
leastNode := curNode.right
for leastNode.left.left != nil {
leastNode = leastNode.left
}
curNode.key = leastNode.left.key
curNode.value = leastNode.left.value
leastNode.left = leastNode.left.right
}
func main() {
var m Map
m.Insert(100, 100)
m.Insert(150, 150)
m.Insert(130, 130)
m.Insert(200, 200)
m.Insert(160, 160)
m.Insert(230, 230)
m.Insert(155, 155)
m.Insert(170, 170)
m.Insert(60, 60)
m.Insert(70, 70)
m.Insert(40, 40)
m.Insert(80, 80)
m.Insert(50, 50)
m.Insert(55, 55)
m.Insert(30, 30)
m.Insert(20, 20)
fmt.Println("Initial map")
fmt.Printf("%s", &m)
var key Key = 60
if v, ok := m.Find(key); ok {
fmt.Printf("\nValue for key %v is %v.\n", key, *v)
*v = 666
}
fmt.Printf("\nMap after changing value for key %v\n", key)
fmt.Printf("%s", &m)
key = 10
if _, ok := m.Find(key); !ok {
fmt.Printf("\nValue for key %v is not found!\n", key)
}
key = 150
fmt.Printf("\nMap after removing key %v\n", key)
m.Rm(key)
fmt.Printf("%s", &m)
} | cmd/map/main.go | 0.745306 | 0.451871 | main.go | starcoder |
package chart
import (
"time"
)
// SecondsPerXYZ
const (
SecondsPerHour = 60 * 60
SecondsPerDay = 60 * 60 * 24
)
// TimeMillis returns a duration as a float millis.
func TimeMillis(d time.Duration) float64 {
return float64(d) / float64(time.Millisecond)
}
func AbsWithBranch(n int64) int64 {
if n < 0 {
return -n
}
return n
}
// DiffHours returns the difference in hours between two times.
func DiffHours(t1, t2 time.Time) (hours int) {
diff := t1.Unix() - t2.Unix()
return int(AbsWithBranch(diff) / SecondsPerHour)
}
// TimeMin returns the minimum and maximum times in a given range.
func TimeMin(times ...time.Time) (min time.Time) {
if len(times) == 0 {
return
}
min = times[0]
for index := 1; index < len(times); index++ {
if times[index].Before(min) {
min = times[index]
}
}
return
}
// TimeMax returns the minimum and maximum times in a given range.
func TimeMax(times ...time.Time) (max time.Time) {
if len(times) == 0 {
return
}
max = times[0]
for index := 1; index < len(times); index++ {
if times[index].After(max) {
max = times[index]
}
}
return
}
// TimeMinMax returns the minimum and maximum times in a given range.
func TimeMinMax(times ...time.Time) (min, max time.Time) {
if len(times) == 0 {
return
}
min = times[0]
max = times[0]
for index := 1; index < len(times); index++ {
if times[index].Before(min) {
min = times[index]
}
if times[index].After(max) {
max = times[index]
}
}
return
}
// TimeToFloat64 returns a float64 representation of a time.
func TimeToFloat64(t time.Time) float64 {
return float64(t.UnixNano())
}
// TimeFromFloat64 returns a time from a float64.
func TimeFromFloat64(tf float64) time.Time {
return time.Unix(0, int64(tf))
}
// TimeDescending sorts a given list of times ascending, or min to max.
type TimeDescending []time.Time
// Len implements sort.Sorter
func (d TimeDescending) Len() int { return len(d) }
// Swap implements sort.Sorter
func (d TimeDescending) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
// Less implements sort.Sorter
func (d TimeDescending) Less(i, j int) bool { return d[i].After(d[j]) }
// TimeAscending sorts a given list of times ascending, or min to max.
type TimeAscending []time.Time
// Len implements sort.Sorter
func (a TimeAscending) Len() int { return len(a) }
// Swap implements sort.Sorter
func (a TimeAscending) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Less implements sort.Sorter
func (a TimeAscending) Less(i, j int) bool { return a[i].Before(a[j]) }
// Days generates a seq of timestamps by day, from -days to today.
func Days(days int) []time.Time {
var values []time.Time
for day := days; day >= 0; day-- {
values = append(values, time.Now().AddDate(0, 0, -day))
}
return values
}
// Hours returns a sequence of times by the hour for a given number of hours
// after a given start.
func Hours(start time.Time, totalHours int) []time.Time {
times := make([]time.Time, totalHours)
last := start
for i := 0; i < totalHours; i++ {
times[i] = last
last = last.Add(time.Hour)
}
return times
}
// HoursFilled adds zero values for the data bounded by the start and end of the xdata array.
func HoursFilled(xdata []time.Time, ydata []float64) ([]time.Time, []float64) {
start, end := TimeMinMax(xdata...)
totalHours := DiffHours(start, end)
finalTimes := Hours(start, totalHours+1)
finalValues := make([]float64, totalHours+1)
var hoursFromStart int
for i, xd := range xdata {
hoursFromStart = DiffHours(start, xd)
finalValues[hoursFromStart] = ydata[i]
}
return finalTimes, finalValues
} | timeutil.go | 0.862757 | 0.491883 | timeutil.go | starcoder |
package builders
import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/juliosueiras/terraform-provider-packer/packer/communicators"
)
func AmazonChrootResource() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "for named builds",
},
"ami_block_device_mapping": &schema.Schema{
Optional: true,
Type: schema.TypeList,
Description: "Add one or more block device mappings to the AMI. These will be attached when booting a new instance from your AMI. Your options here may vary depending on the type of VM you use.",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_on_termination": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Description: "Indicates whether the EBS volume is deleted on instance termination. Default false. NOTE: If this value is not explicitly set to true and volumes are not cleaned up by an alternative method, additional volumes will accumulate after every build.",
},
"device_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The device name exposed to the instance (for example, /dev/sdh or xvdh). Required when specifying volume_size. ",
},
"encrypted": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Description: "Indicates whether to encrypt the volume or not",
},
"kms_key_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The ARN for the KMS encryption key. When specifying kms_key_id, encrypted needs to be set to true.",
},
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Description: "The number of I/O operations per second (IOPS) that the volume supports. See the documentation on IOPs for more information",
},
"no_device": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Description: "Suppresses the specified device included in the block device mapping of the AMI",
},
"snapshot_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The ID of the snapshot",
},
"virtual_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The virtual device name. See the documentation on Block Device Mapping for more information",
},
"volume_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Description: "The size of the volume, in GiB. Required if not specifying a snapshot_id",
},
"volume_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes",
},
},
},
},
"ami_name": &schema.Schema{
Required: true,
Type: schema.TypeString,
Description: "The name of the resulting AMI that will appear when managing AMIs in the AWS console or via APIs. This must be unique. To help make this unique, use a function like timestamp (see template engine for more info)",
},
"ami_description": &schema.Schema{
Optional: true,
Description: "The description to set for the resulting AMI(s). By default this description is empty. This is a template engine, see Build template data for more information.",
Type: schema.TypeString,
},
"ami_virtualization_type": &schema.Schema{
Optional: true,
Description: "ami_virtualization_type (string) - The type of virtualization for the AMI you are building. This option is required to register HVM images. Can be \"paravirtual\" (default) or \"hvm\". ",
Type: schema.TypeString,
},
"ami_users": &schema.Schema{
Optional: true,
Description: "A list of account IDs that have access to launch the resulting AMI(s). By default no additional users other than the user creating the AMI has permissions to launch it.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"ami_groups": &schema.Schema{
Optional: true,
Description: "A list of groups that have access to launch the resulting AMI(s). By default no groups have permission to launch the AMI. all will make the AMI publicly accessible.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"ami_product_codes": &schema.Schema{
Optional: true,
Description: "A list of product codes to associate with the AMI. By default no product codes are associated with the AMI.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"ami_regions": &schema.Schema{
Optional: true,
Description: "A list of regions to copy the AMI to. Tags and attributes are copied along with the AMI. AMI copying takes time depending on the size of the AMI, but will generally take many minutes.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"skip_region_validation": &schema.Schema{
Optional: true,
Description: "Set to true if you want to skip validation of the ami_regions configuration option. Default false.",
Type: schema.TypeBool,
},
"tags": &schema.Schema{
Optional: true,
Description: "Tags applied to the AMI. This is a template engine, see Build template data for more information.",
Type: schema.TypeMap,
},
"ena_support": &schema.Schema{
Optional: true,
Description: "Enable enhanced networking (ENA but not SriovNetSupport) on HVM-compatible AMIs. If true, add ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make sure enhanced networking is enabled on your instance. See Amazon's documentation on enabling enhanced networking. Default false.",
Type: schema.TypeBool,
},
"sriov_support": &schema.Schema{
Optional: true,
Description: "Enable enhanced networking (SriovNetSupport but not ENA) on HVM-compatible AMIs. If true, add ec2:ModifyInstanceAttribute to your AWS IAM policy. Note: you must make sure enhanced networking is enabled on your instance. See Amazon's documentation on enabling enhanced networking. Default false.",
Type: schema.TypeBool,
},
"force_deregister": &schema.Schema{
Optional: true,
Description: "Force Packer to first deregister an existing AMI if one with the same name already exists. Default false.",
Type: schema.TypeBool,
},
"force_delete_snapshot": &schema.Schema{
Optional: true,
Description: "Force Packer to delete snapshots associated with AMIs, which have been deregistered by force_deregister. Default false.",
Type: schema.TypeBool,
},
"encrypt_boot": &schema.Schema{
Optional: true,
Description: "Instruct packer to automatically create a copy of the AMI with an encrypted boot volume (discarding the initial unencrypted AMI in the process). Packer will always run this operation, even if the base AMI has an encrypted boot volume to start with. Default false.",
Type: schema.TypeBool,
},
"kms_key_id": &schema.Schema{
Optional: true,
Description: "The ID of the KMS key to use for boot volume encryption. This only applies to the main region, other regions where the AMI will be copied will be encrypted by the default EBS KMS key.",
Type: schema.TypeString,
},
"region_kms_key_ids": &schema.Schema{
Optional: true,
Description: "a map of regions to copy the ami to, along with the custom kms key id to use for encryption for that region. Keys must match the regions provided in ami_regions. If you just want to encrypt using a default ID, you can stick with kms_key_id and ami_regions. If you want a region to be encrypted with that region's default key ID, you can use an empty string \"\" instead of a key id in this map. (e.g. \"us-east-1\": \"\") However, you cannot use default key IDs if you are using this in conjunction with snapshot_users -- in that situation you must use custom keys. ",
Type: schema.TypeMap,
},
"snapshot_tags": &schema.Schema{
Optional: true,
Description: "Tags to apply to snapshot. They will override AMI tags if already applied to snapshot. This is a template engine, see Build template data for more information.",
Type: schema.TypeMap,
},
"snapshot_users": &schema.Schema{
Optional: true,
Description: "A list of account IDs that have access to create volumes from the snapshot(s). By default no additional users other than the user creating the AMI has permissions to create volumes from the backing snapshot(s).",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"snapshot_groups": &schema.Schema{
Optional: true,
Description: "A list of groups that have access to create volumes from the snapshot(s). By default no groups have permission to create volumes from the snapshot(s). all will make the snapshot publicly accessible.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"access_key": &schema.Schema{
Required: true,
Type: schema.TypeString,
Description: "The access key used to communicate with AWS. Learn how to set this.",
},
"custom_endpoint_ec2": &schema.Schema{
Optional: true,
Description: "This option is useful if you use a cloud provider whose API is compatible with aws EC2. Specify another endpoint like this https://ec2.custom.endpoint.com.",
Type: schema.TypeString,
},
"mfa_code": &schema.Schema{
Optional: true,
Description: "The MFA TOTP code. This should probably be a user variable since it changes all the time.",
Type: schema.TypeString,
},
"profile": &schema.Schema{
Optional: true,
Description: "The profile to use in the shared credentials file for AWS. See Amazon's documentation on specifying profiles for more details.",
Type: schema.TypeString,
},
"region": &schema.Schema{
Optional: true,
Description: "Region of AMI",
Type: schema.TypeString,
},
"secret_key": &schema.Schema{
Required: true,
Type: schema.TypeString,
Description: "The secret key used to communicate with AWS. Learn how to set this.",
},
"chroot_mount": &schema.Schema{
Optional: true,
Description: "This is a list of devices to mount into the chroot environment. This configuration parameter requires some additional documentation which is in the \"Chroot Mounts\" section below. Please read that section for more information on how to use this.",
Type: schema.TypeList,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"values": &schema.Schema{
Required: true,
Type: schema.TypeList,
Description: "Values of mount",
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
"command_wrapper": &schema.Schema{
Optional: true,
Description: "How to run shell commands. This defaults to {{.Command}}. This may be useful to set if you want to set environmental variables or perhaps run it with sudo or so on. This is a configuration template where the .Command variable is replaced with the command to be run. Defaults to \"{{.Command}}\".",
Type: schema.TypeString,
},
"copy_files": &schema.Schema{
Optional: true,
Description: "Paths to files on the running EC2 instance that will be copied into the chroot environment prior to provisioning. Defaults to /etc/resolv.conf so that DNS lookups work. Pass an empty list to skip copying /etc/resolv.conf. You may need to do this if you're building an image that uses systemd",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"device_path": &schema.Schema{
Optional: true,
Description: "The path to the device where the root volume of the source AMI will be attached. This defaults to \"\" (empty string), which forces Packer to find an open device automatically.",
Type: schema.TypeString,
},
"nvme_device_path": &schema.Schema{
Optional: true,
Description: "When we call the mount command (by default mount -o device dir), the string provided in nvme_mount_path will replace device in that command. When this option is not set, device in that command will be something like /dev/sdf1, mirroring the attached device name. This assumption works for most instances but will fail with c5 and m5 instances. In order to use the chroot builder with c5 and m5 instances, you must manually set nvme_device_path and device_path.",
Type: schema.TypeString,
},
"from_scratch": &schema.Schema{
Optional: true,
Description: "Build a new volume instead of starting from an existing AMI root volume snapshot. Default false. If true, source_ami is no longer used and the following options become required: ami_virtualization_type, pre_mount_commands and root_volume_size. The below options are also required in this mode only:",
Type: schema.TypeBool,
},
"mount_options": &schema.Schema{
Optional: true,
Description: "Options to supply the mount command when mounting devices. Each option will be prefixed with -o and supplied to the mount command ran by Packer. Because this command is ran in a shell, user discretion is advised. See this manual page for the mount command for valid file system specific options",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"mount_partition": &schema.Schema{
Optional: true,
Description: "The partition number containing the / partition. By default this is the first partition of the volume, (for example, xvda1) but you can designate the entire block device by setting \"mount_partition\": \"0\" in your config, which will mount xvda instead.",
Type: schema.TypeString,
},
"mount_path": &schema.Schema{
Optional: true,
Description: "The path where the volume will be mounted. This is where the chroot environment will be. This defaults to /mnt/packer-amazon-chroot-volumes/{{.Device}}. This is a configuration template where the .Device variable is replaced with the name of the device where the volume is attached.",
Type: schema.TypeString,
},
"post_mount_commands": &schema.Schema{
Optional: true,
Description: "As pre_mount_commands, but the commands are executed after mounting the root device and before the extra mount and copy steps. The device and mount path are provided by {{.Device}} and {{.MountPath}}.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"pre_mount_commands": &schema.Schema{
Optional: true,
Description: "A series of commands to execute after attaching the root volume and before mounting the chroot. This is not required unless using from_scratch. If so, this should include any partitioning and filesystem creation commands. The path to the device is provided by {{.Device}}.",
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
"root_device_name": &schema.Schema{
Optional: true,
Description: "The root device name. For example, xvda.",
Type: schema.TypeString,
},
"root_volume_size": &schema.Schema{
Optional: true,
Description: "The size of the root volume in GB for the chroot environment and the resulting AMI. Default size is the snapshot size of the source_ami unless from_scratch is true, in which case this field must be defined.",
Type: schema.TypeInt,
},
"source_ami": &schema.Schema{
Required: true,
Type: schema.TypeString,
Description: "The source AMI whose root volume will be copied and provisioned on the currently running instance. This must be an EBS-backed AMI with a root volume snapshot that you have access to. Note: this is not used when from_scratch is set to true.",
},
"source_ami_filter": &schema.Schema{
Optional: true,
Type: schema.TypeList,
MaxItems: 1,
Description: "Filters used to populate the source_ami field",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"filters": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Description: "filters used to select a source_ami. NOTE: This will fail unless exactly one AMI is returned. Any filter described in the docs for DescribeImages is valid.",
},
"owners": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "This scopes the AMIs to certain Amazon account IDs. This is helpful to limit the AMIs to a trusted third party, or to your own account.",
},
"most_recent": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "Selects the newest created image when true. This is most useful for selecting a daily distro build.",
},
},
},
},
"communicator": &schema.Schema{
Optional: true,
Type: schema.TypeString,
},
"ssh": &schema.Schema{
Optional: true,
Type: schema.TypeList,
Elem: communicators.SSHCommunicatorResource(),
},
"winrm": &schema.Schema{
Optional: true,
Type: schema.TypeList,
Elem: communicators.WinRMCommunicatorResource(),
},
},
}
} | vendor/github.com/juliosueiras/terraform-provider-packer/packer/builders/amazonchroot.go | 0.59408 | 0.426083 | amazonchroot.go | starcoder |
package toscalib
// RequirementDefinition as described in Appendix 6.2
type RequirementDefinition struct {
Capability string `yaml:"capability" json:"capability"` // The required reserved keyname used that can be used to provide the name of a valid Capability Type that can fulfil the requirement
Node string `yaml:"node,omitempty" json:"node,omitempty"` // The optional reserved keyname used to provide the name of a valid Node Type that contains the capability definition that can be used to fulfil the requirement
Relationship string `yaml:"relationship" json:"relationship,omitempty"`
RelationshipName string
Occurrences ToscaRange `yaml:"occurences,omitempty" json:"occurences,omitempty"` // The optional minimum and maximum occurrences for the requirement. Note: the keyword UNBOUNDED is also supported to represent any positive integer
}
// UnmarshalYAML is used to match both Simple Notation Example and Full Notation Example
func (r *RequirementDefinition) UnmarshalYAML(unmarshal func(interface{}) error) error {
// First try the Short notation
var cas string
err := unmarshal(&cas)
if err == nil {
r.Capability = cas
return nil
}
// If error, try the full struct
var test2 struct {
Capability string `yaml:"capability" json:"capability"` // The required reserved keyname used that can be used to provide the name of a valid Capability Type that can fulfil the requirement
Node string `yaml:"node,omitempty" json:"node,omitempty"` // The optional reserved keyname used to provide the name of a valid Node Type that contains the capability definition that can be used to fulfil the requirement
Relationship string `yaml:"relationship" json:"relationship,omitempty"`
Occurrences ToscaRange `yaml:"occurences,omitempty" json:"occurences,omitempty"` // The optional minimum and maximum occurrences for the requirement. Note: the keyword UNBOUNDED is also supported to represent any positive integer
}
err = unmarshal(&test2)
if err != nil {
return err
}
r.Capability = test2.Capability
r.Node = test2.Node
r.Relationship = test2.Relationship
r.Occurrences = test2.Occurrences
return nil
}
// UnmarshalYAML is used to match both Simple Notation Example and Full Notation Example
func (r *RequirementAssignment) UnmarshalYAML(unmarshal func(interface{}) error) error {
// First try the Short notation
var cas string
err := unmarshal(&cas)
if err == nil {
r.Node = cas
return nil
}
// If error, try the full struct
var test2 struct {
Capability string `yaml:"capability,omitempty"`
Node string `yaml:"node,omitempty"`
Nodefilter NodeFilter `yaml:"node_filter,omitempty"`
Relationship RelationshipType `yaml:"relationship,omitempty"`
}
err = unmarshal(&test2)
if err == nil {
r.Capability = test2.Capability
r.Node = test2.Node
r.Nodefilter = test2.Nodefilter
r.Relationship = test2.Relationship
return nil
}
var test3 struct {
Capability string `yaml:"capability,omitempty"`
Node string `yaml:"node,omitempty"`
Nodefilter NodeFilter `yaml:"node_filter,omitempty"`
RelationshipName string `yaml:"relationship,omitempty"`
}
err = unmarshal(&test3)
if err != nil {
return err
}
r.Capability = test3.Capability
r.Node = test3.Node
r.Nodefilter = test3.Nodefilter
r.RelationshipName = test3.RelationshipName
return nil
}
// RequirementAssignment as described in Appendix 7.2
type RequirementAssignment struct {
Capability string `yaml:"capability,omitempty" json:"capability,omitempty"` /* The optional reserved keyname used to provide the name of either a:
- Capability definition within a target node template that can fulfill the requirement.
- Capability Type that the provider will use to select a type-compatible target node template to fulfill the requirement at runtime. */
Node string `yaml:"node,omitempty" json:"node,omitempty"` /* The optional reserved keyname used to identify the target node of a relationship. specifically, it is used to provide either a:
- Node Template name that can fulfil the target node requirement.
- Node Type name that the provider will use to select a type-compatible node template to fulfil the requirement at runtime. */
//Relationship string `yaml:"relationship,omitempty" json:"relationship,omitempty"` /* The optional reserved keyname used to provide the name of either a:
//- Relationship Template to use to relate the source node to the (capability in the) target node when fulfilling the requirement.
//- Relationship Type that the provider will use to select a type-compatible relationship template to relate the source node to the target node at runtime. */
Nodefilter NodeFilter `yaml:"node_filter,omitempty" json:"node_filter,omitempty"` // The optional filter definition that TOSCA orchestrators or providers would use to select a type-compatible target node that can fulfill the associated abstract requirement at runtime.o
/* The following is the list of recognized keynames for a TOSCA requirement assignment’s relationship keyname which is used when Property assignments need to be provided to inputs of declared interfaces or their operations:*/
Relationship RelationshipType
RelationshipName string
// It looks like the Relationship type is not always present and from times to time (at least in the ELK example, we find the Interfaces directly)
}
/* The following is the list of recognized keynames for a TOSCA requirement assignment’s relationship keyname which is used when Property assignments need to be provided to inputs of declared interfaces or their operations:*/
type RequirementRelationship struct {
Type string `yaml:"type" json:"type"` // The optional reserved keyname used to provide the name of the Relationship Type for the requirement assignment’s relationship keyname.
Interfaces map[string]InterfaceDefinition `yaml:"interfaces,omitempty" json:"interfaces,omitempty"` // The optional reserved keyname used to reference declared (named) interface definitions of the corresponding Relationship Type in order to provide Property assignments for these interfaces or operations of these interfaces.
Properties map[string]interface{} `yaml:"properties" json:"properties"` // The optional list property definitions that comprise the schema for a complex Data Type in TOSCA.
} | requirements.go | 0.823328 | 0.528898 | requirements.go | starcoder |
package schema
import (
"math"
"strings"
"github.com/liquidata-inc/dolt/go/store/types"
)
// KindToLwrStr maps a noms kind to the kinds lowercased name
var KindToLwrStr = make(map[types.NomsKind]string)
// LwrStrToKind maps a lowercase string to the noms kind it is referring to
var LwrStrToKind = make(map[string]types.NomsKind)
func init() {
for t, s := range types.KindToString {
KindToLwrStr[t] = strings.ToLower(s)
LwrStrToKind[strings.ToLower(s)] = t
}
}
// InvalidTag is used as an invalid tag
var InvalidTag uint64 = math.MaxUint64
// ReservedTagMin is the start of a range of tags which the user should not be able to use in their schemas.
const ReservedTagMin uint64 = 1 << 63
// InvalidCol is a Column instance that is returned when there is nothing to return and can be tested against.
var InvalidCol = NewColumn("invalid", InvalidTag, types.NullKind, false)
// Column is a structure containing information about a column in a row in a table.
type Column struct {
// Name is the name of the column
Name string
// Tag should be unique per versioned schema and allows
Tag uint64
// Kind is the types.NomsKind that values of this column will be
Kind types.NomsKind
// IsPartOfPK says whether this column is part of the primary key
IsPartOfPK bool
// Constraints are rules that can be checked on each column to say if the columns value is valid
Constraints []ColConstraint
}
// NewColumn creates a Column instance
func NewColumn(name string, tag uint64, kind types.NomsKind, partOfPK bool, constraints ...ColConstraint) Column {
for _, c := range constraints {
if c == nil {
panic("nil passed as a constraint")
}
}
return Column{
name,
tag,
kind,
partOfPK,
constraints,
}
}
// IsNullable returns whether the column can be set to a null value.
func (c Column) IsNullable() bool {
for _, cnst := range c.Constraints {
if cnst.GetConstraintType() == NotNullConstraintType {
return false
}
}
return true
}
// Equals tests equality between two columns.
func (c Column) Equals(other Column) bool {
return c.Name == other.Name &&
c.Tag == other.Tag &&
c.Kind == other.Kind &&
c.IsPartOfPK == other.IsPartOfPK &&
ColConstraintsAreEqual(c.Constraints, other.Constraints)
}
// KindString returns the string representation of the NomsKind stored in the column.
func (c Column) KindString() string {
return KindToLwrStr[c.Kind]
} | go/libraries/doltcore/schema/column.go | 0.677581 | 0.420421 | column.go | starcoder |
// Package utility implements a reasoner AI based on utility theory.
package utility
import "fmt"
// Utility theory based AI system configuration.
type Config struct {
Input []InputConf
Combo []ComboConf
}
// Configuration for input based utility curve(s).
type InputConf struct {
Id int // A referable identifier for the utility
Min float64 // Interval start for normalization
Max float64 // Interval end for normalization
Set bool // Flag whether the config defines a set of utilities
NonZero bool // Flag whether the curve is allowed absolute zero output
Curve Curve // Function mapping the data to a curve
}
// Configuration for combination based utility curve(s).
type ComboConf struct {
Id int // A referable identifier for the utility
SrcA int // First input source of the combinator
SrcB int // Second input source of the combinator
Set bool // Flag whether the config defines a set of utilities
Comb Combinator // Function combining the input sources
}
// Utility theory based decision making system.
type System struct {
utils map[int]utility
}
// Creates a utility theory AI system.
func New(config *Config) *System {
sys := &System{
utils: make(map[int]utility),
}
for _, input := range config.Input {
sys.addInput(&input)
}
for _, combo := range config.Combo {
sys.addCombo(&combo)
}
return sys
}
// Injects a new input based utility curve (set) into the system.
func (s *System) addInput(config *InputConf) {
if config.Set {
// A set of utilities is needed
utils := newInputSetUtility(config.Curve, config.NonZero)
utils.Limit(config.Min, config.Max)
s.utils[config.Id] = utils
} else {
// Singleton input utility, insert as is
util := newInputUtility(config.Curve, config.NonZero)
util.Limit(config.Min, config.Max)
s.utils[config.Id] = util
}
}
// Injects a new combinatorial utility curve set into the system.
func (s *System) addCombo(config *ComboConf) {
if config.Set {
// A set of utilities is needed
srcA := s.utils[config.SrcA]
srcB := s.utils[config.SrcB]
s.utils[config.Id] = newComboSetUtility(config.Comb, srcA, srcB)
} else {
// Singleton combo utility, insert as is
srcA := s.utils[config.SrcA]
srcB := s.utils[config.SrcB]
util := newComboUtility(config.Comb)
util.Init(srcA, srcB)
s.utils[config.Id] = util
}
}
// Sets the normalization limits for data a utility.
func (s *System) Limit(id int, min, max float64) {
switch util := s.utils[id].(type) {
case *inputUtility:
util.Limit(min, max)
case *inputSetUtility:
util.Limit(min, max)
default:
panic(fmt.Sprintf("Unknown utility type: %+v", util))
}
}
// Updates the input of a data utility.
func (s *System) Update(id int, input float64) {
s.utils[id].(*inputUtility).Update(input)
}
// Updates the input of a member of a data utility set.
func (s *System) UpdateOne(id, index int, input float64) {
s.utils[id].(*inputSetUtility).Update(index, input)
}
// Updates the input of all the members of a data utility set.
func (s *System) UpdateAll(id int, inputs []float64) {
util := s.utils[id].(*inputSetUtility)
for i, input := range inputs {
util.Update(i, input)
}
}
// Evaluates a singleton utility.
func (s *System) Evaluate(id int) float64 {
return s.utils[id].(singleUtility).Evaluate()
}
// Evaluates a member of a utility set.
func (s *System) EvaluateOne(id, index int) float64 {
return s.utils[id].(multiUtility).Evaluate(index)
} | performance/contadortest/vendor/gopkg.in/karalabe/cookiejar.v2/ai/utility/system.go | 0.681833 | 0.404802 | system.go | starcoder |
package token
import (
"unicode"
"unicode/utf8"
)
// isSection compares a number of positions (skipping whitespace) to determine if the runes are sectionAdornments and returns
// a true if the positions match each other. Rune comparison begins at the current lexer position. isSection returns false if
// there is a blank line between the positions or if there is a rune mismatch between positions.
func isSection(l *Lexer) bool {
// Check two positions to see if the line contains a section adornment
checkLine := func(input string) bool {
var first, last rune
for j := 0; j < len(input); j++ {
r, _ := utf8.DecodeRuneInString(input[j:])
if unicode.IsSpace(r) {
l.Msg("Skipping space rune")
continue
}
if first == '\x00' {
first = r
last = r
}
// l.Log.Debugf("first: %q, last: %q, r: %q, j: %d", first, last, r, j)
if !isSectionAdornment(r) || (r != first && last != first) {
l.Msg("Section not found")
return false
}
last = r
}
return true
}
if isTransition(l) {
l.Msg("Returning (found transition)")
return false
}
if checkLine(l.currentLine()) {
l.Msg("Found section adornment")
return true
}
nLine := l.peekNextLine()
if nLine != "" {
if checkLine(nLine) {
l.Msg("Found section adornment (nextline)")
return true
}
}
l.Msg("Section not found")
return false
}
// isSectionAdornment returns true if r matches a section adornment.
func isSectionAdornment(r rune) bool {
for _, a := range sectionAdornments {
if a == r {
return true
}
}
return false
}
// lexSection is used after isSection() has determined that the next runes of input are section. From here, the lexTitle()
// and lexSectionAdornment() are called based on the input.
func lexSection(l *Lexer) stateFn {
// l.Log.Debugf("l.mark: %#U, l.index: %d, l.start: %d, l.width: %d, " + "l.line: %d", l.mark, l.index, l.start,
// l.width, l.lineNumber())
if isSectionAdornment(l.mark) {
if l.lastItem != nil && l.lastItem.Type != Title {
return lexSectionAdornment
}
lexSectionAdornment(l)
} else if unicode.IsSpace(l.mark) {
return lexSpace
} else if l.mark == EOL {
l.next()
} else if unicode.IsPrint(l.mark) {
return lexTitle
}
return lexStart
}
// lexTitle consumes input until newline and emits an Title token. If spaces are detected at the start of the line, an
// Space is emitted. Spaces after the title (and before newline) are ignored. On completion control is returned to
// lexSection.
func lexTitle(l *Lexer) stateFn {
for {
if isInlineMarkup(l) {
if l.index > l.start {
l.emit(Title)
}
lexInlineMarkup(l)
if l.isEndOfLine() {
l.next()
break
}
} else if l.isEndOfLine() {
l.emit(Title)
break
}
l.next()
}
return lexSection
}
// lexSectionAdornment advances the lexer until a newline is encountered and emits a SectionAdornment token. Control is
// returned to lexSection() on completion.
func lexSectionAdornment(l *Lexer) stateFn {
for {
if l.isEndOfLine() {
l.emit(SectionAdornment)
if l.mark == EOL {
break
}
}
l.next()
}
return lexSection
} | pkg/token/section.go | 0.619817 | 0.412589 | section.go | starcoder |
package core
import (
"fmt"
"math"
)
type RotatedRect struct {
Center Point
Size Size
Angle float64
}
func NewRotatedRect() (rcvr *RotatedRect) {
rcvr = &RotatedRect{}
rcvr.Center = *NewPoint2()
rcvr.Size = *NewSize2()
rcvr.Angle = 0
return
}
func NewRotatedRect2(c *Point, s *Size, a float64) (rcvr *RotatedRect) {
rcvr = &RotatedRect{}
rcvr.Center = *c.Clone()
rcvr.Size = *s.Clone()
rcvr.Angle = a
return
}
func NewRotatedRect3(vals []float64) (rcvr *RotatedRect) {
rcvr = NewRotatedRect()
rcvr.Set(vals)
return
}
func (rcvr *RotatedRect) BoundingRect() *Rect {
pt := make([]*Point, 4)
rcvr.Points(pt)
r := NewRect(int(math.Floor(math.Min(math.Min(math.Min(pt[0].X, pt[1].X), pt[2].X), pt[3].X))),
int(math.Floor(math.Min(math.Min(math.Min(pt[0].Y, pt[1].Y), pt[2].Y), pt[3].Y))),
int(math.Ceil(math.Max(math.Max(math.Max(pt[0].X, pt[1].X), pt[2].X), pt[3].X))),
int(math.Ceil(math.Max(math.Max(math.Max(pt[0].Y, pt[1].Y), pt[2].Y), pt[3].Y))))
r.Width -= r.X - 1
r.Height -= r.Y - 1
return r
}
func (rcvr *RotatedRect) Clone() *RotatedRect {
return NewRotatedRect2(&rcvr.Center, &rcvr.Size, rcvr.Angle)
}
func (rcvr *RotatedRect) Equals(obj interface{}) bool {
if rcvr == obj {
return true
}
it, ok := obj.(*RotatedRect)
if !ok {
return false
}
return rcvr.Center.Equals(it.Center) && rcvr.Size.Equals(it.Size) && rcvr.Angle == it.Angle
}
func (rcvr *RotatedRect) Points(pt []*Point) {
_angle := rcvr.Angle * math.Pi / 180.0
b := math.Cos(_angle) * 0.5
a := math.Sin(_angle) * 0.5
pt[0] = NewPoint(rcvr.Center.X-a*rcvr.Size.Height-b*rcvr.Size.Width, rcvr.Center.Y+b*rcvr.Size.Height-a*rcvr.Size.Width)
pt[1] = NewPoint(rcvr.Center.X+a*rcvr.Size.Height-b*rcvr.Size.Width, rcvr.Center.Y-b*rcvr.Size.Height-a*rcvr.Size.Width)
pt[2] = NewPoint(2*rcvr.Center.X-pt[0].X, 2*rcvr.Center.Y-pt[0].Y)
pt[3] = NewPoint(2*rcvr.Center.X-pt[0].X, 2*rcvr.Center.Y-pt[0].Y)
}
func (rcvr *RotatedRect) Set(vals []float64) {
if vals != nil {
rcvr.Center.X = func() float64 {
if len(vals) > 0 {
return vals[0]
} else {
return 0
}
}()
rcvr.Center.Y = func() float64 {
if len(vals) > 1 {
return vals[1]
} else {
return 0
}
}()
rcvr.Size.Width = func() float64 {
if len(vals) > 2 {
return vals[2]
} else {
return 0
}
}()
rcvr.Size.Height = func() float64 {
if len(vals) > 3 {
return vals[3]
} else {
return 0
}
}()
rcvr.Angle = func() float64 {
if len(vals) > 4 {
return vals[4]
} else {
return 0
}
}()
} else {
rcvr.Center.X = 0
rcvr.Center.X = 0
rcvr.Size.Width = 0
rcvr.Size.Height = 0
rcvr.Angle = 0
}
}
func (rcvr *RotatedRect) String() string {
return fmt.Sprintf("%v%v%v%v%v%v%v", "{ ", rcvr.Center, " ", rcvr.Size, " * ", rcvr.Angle, " }")
} | opencv3/core/RotatedRect.java.go | 0.675658 | 0.497376 | RotatedRect.java.go | starcoder |
package ahrs
import (
"github.com/skelterjohn/go.matrix"
"log"
"math"
)
type KalmanState struct {
State
}
func (s *KalmanState) CalcRollPitchHeadingUncertainty() (droll float64, dpitch float64, dheading float64) {
droll, dpitch, dheading = VarFromQuaternion(s.E0, s.E1, s.E2, s.E3,
math.Sqrt(s.M.Get(6, 6)), math.Sqrt(s.M.Get(7, 7)),
math.Sqrt(s.M.Get(8, 8)), math.Sqrt(s.M.Get(9, 9)))
return
}
// GetState returns the Kalman state of the system
func (s *KalmanState) GetState() *State {
return &s.State
}
// GetStateMap returns the state information for analysis
func (s *KalmanState) GetStateMap() (dat *map[string]float64) {
return
}
// Initialize the state at the start of the Kalman filter, based on current measurements
func InitializeKalman(m *Measurement) (s *KalmanState) {
s = new(KalmanState)
s.init(m)
return
}
func (s *KalmanState) init(m *Measurement) {
// Diagonal matrix of initial state uncertainties, will be squared into covariance below
// Specifics here aren't too important--it will change very quickly
s.M = matrix.Diagonal([]float64{
50, 5, 5, // U*3
0.4, 0.2, 0.5, // Z*3
0.5, 0.5, 0.5, 0.5, // E*4
2, 2, 2, // H*3
65, 65, 65, // N*3
10, 10, 2, // V*3
0.02, 0.02, 0.02, // C*3
0.002, 0.002, 0.002, 0.002, // F*4
0.1, 0.1, 0.1, // D*4
10, 10, 10, // L*4
})
s.M = matrix.Product(s.M, s.M)
// Diagonal matrix of state process uncertainties per s, will be squared into covariance below
// Tuning these is more important
tt := math.Sqrt(60.0*60.0) // One-hour time constant for drift of biases V, C, F, D, L
s.N = matrix.Diagonal([]float64{
1, 0.1, 0.1, // U*3
0.2, 0.1, 0.2, // Z*3
0.02, 0.02, 0.02, 0.02, // E*4
1, 1, 1, // H*3
100, 100, 100, // N*3
5/tt, 5/tt, 5/tt, // V*3
0.01/tt, 0.01/tt, 0.01/tt, // C*3
0.0001/tt, 0.0001/tt, 0.0001/tt, 0.0001/tt, // F*4
0.1/tt, 0.1/tt, 0.1/tt, // D*3
0.1/tt, 0.1/tt, 0.1/tt, // L*3
})
s.N = matrix.Product(s.N, s.N)
//TODO westphae: for now just treat the case !m.UValid; if we have U, we can do a lot more!
// Best guess at initial airspeed is initial groundspeed
if m.WValid {
s.U1 = math.Hypot(m.W1, m.W2)
s.M.Set(0, 0, 14*14) // Our estimate of airspeed is better
s.M.Set(16, 16, 10) // Matching uncertainty of windspeed
s.M.Set(17, 17, 10) // Matching uncertainty of windspeed
}
// Best guess at initial heading is initial track
if m.WValid && s.U1 > 5 {
// Simplified half-angle formulae
s.E0, s.E3 = math.Sqrt((s.U1 + m.W1) / (2 * s.U1)), math.Sqrt((s.U1 - m.W1) / (2 * s.U1))
if m.W2 < 0 {
s.E3 *= -1
}
s.M.Set(6, 6, 0.1*0.1) // Our estimate of orientation is better
s.M.Set(7, 7, 0.1*0.1)
s.M.Set(8, 8, 0.1*0.1)
s.M.Set(9, 9, 0.1*0.1)
} else { // If no groundspeed available then no idea which direction we're pointing
s.E0 = 1 // assume east
}
s.F0 = 1 // Initial guess is that it's oriented pointing forward and level
s.normalize()
if m.MValid { //TODO westphae: could do more here to get a better Fn since we know N points north
s.N1 = m.M1*s.e11 + m.M2*s.e12 + m.M3*s.e13
s.N2 = m.M1*s.e21 + m.M2*s.e22 + m.M3*s.e23
s.N3 = m.M1*s.e31 + m.M2*s.e32 + m.M3*s.e33
} else {
s.M.Set(13, 13, Big) // Don't try to update the magnetometer
s.M.Set(14, 14, Big)
s.M.Set(15, 15, Big)
s.M.Set(29, 29, Big)
s.M.Set(30, 30, Big)
s.M.Set(31, 31, Big)
}
return
}
// Compute runs first the prediction and then the update phases of the Kalman filter
func (s *KalmanState) Compute(m *Measurement) {
s.Predict(m.T)
s.Update(m)
}
// Valid applies some heuristics to detect whether the computed state is valid or not
func (s *KalmanState) Valid() (ok bool) {
ok = true
if s.U1 < -5 {
log.Println("AHRS got negative airspeed, restarting")
ok = false
}
if math.Abs(s.U1) > 300 || math.Abs(s.U2) > 20 || math.Abs(s.U3) > 20 ||
math.Abs(s.V1) > 40 || math.Abs(s.V2) > 40 || math.Abs(s.V3) > 40 {
log.Println("Speeds too high")
ok = false
}
roll, pitch, heading := s.CalcRollPitchHeading()
droll, dpitch, dheading := s.CalcRollPitchHeadingUncertainty()
if droll > 2.5*Deg || dpitch > 2.5*Deg {
log.Printf("AHRS too uncertain: roll %5.1f +/- %3.1f, pitch %4.1f +/- %3.1f, heading %5.1f +/- %3.1f\n",
roll/Deg, droll/Deg, pitch/Deg, dpitch/Deg, heading/Deg, dheading/Deg)
ok = false
}
return ok
}
// Predict performs the prediction phase of the Kalman filter
func (s *KalmanState) Predict(t float64) {
f := s.calcJacobianState(t)
dt := t - s.T
s.U1 += dt*s.Z1*G
s.U2 += dt*s.Z2*G
s.U3 += dt*s.Z3*G
s.E0 += 0.5*dt*(-s.H1*s.E1 - s.H2*s.E2 - s.H3*s.E3)*Deg
s.E1 += 0.5*dt*(+s.H1*s.E0 + s.H2*s.E3 - s.H3*s.E2)*Deg
s.E2 += 0.5*dt*(-s.H1*s.E3 + s.H2*s.E0 + s.H3*s.E1)*Deg
s.E3 += 0.5*dt*(+s.H1*s.E2 - s.H2*s.E1 + s.H3*s.E0)*Deg
s.normalize()
// All other state vectors are unchanged
s.T = t
s.M = matrix.Sum(matrix.Product(f, matrix.Product(s.M, f.Transpose())), matrix.Scaled(s.N, dt))
}
// Update applies the Kalman filter corrections given the measurements
func (s *KalmanState) Update(m *Measurement) {
z := s.PredictMeasurement()
//TODO westphae: for testing, if no GPS, we're probably inside at a desk - assume zero groundspeed
if !m.WValid {
m.W1 = 0
m.W2 = 0
m.W3 = 0
m.WValid = true
}
y := matrix.Zeros(15, 1)
y.Set( 0, 0, m.U1 - z.U1)
y.Set( 1, 0, m.U2 - z.U2)
y.Set( 2, 0, m.U3 - z.U3)
y.Set( 3, 0, m.W1 - z.W1)
y.Set( 4, 0, m.W2 - z.W2)
y.Set( 5, 0, m.W3 - z.W3)
y.Set( 6, 0, m.A1 - z.A1)
y.Set( 7, 0, m.A2 - z.A2)
y.Set( 8, 0, m.A3 - z.A3)
y.Set( 9, 0, m.B1 - z.B1)
y.Set(10, 0, m.B2 - z.B2)
y.Set(11, 0, m.B3 - z.B3)
y.Set(12, 0, m.M1 - z.M1)
y.Set(13, 0, m.M2 - z.M2)
y.Set(14, 0, m.M3 - z.M3)
h := s.calcJacobianMeasurement()
var v float64
// U, W, A, B, M
if m.UValid {
_, _, v = m.Accums[0](m.U1)
m.M.Set(0, 0, v)
} else {
y.Set(0, 0, 0)
m.M.Set(0, 0, Big)
}
// U2, U3 are just here to bias toward coordinated flight
//TODO westphae: not sure I really want these to not be BIG
m.M.Set(1, 1, 1)
m.M.Set(2, 2, 1)
if m.WValid {
_, _, v = m.Accums[3](m.W1)
m.M.Set(3, 3, v)
_, _, v = m.Accums[4](m.W2)
m.M.Set(4, 4, v)
_, _, v = m.Accums[5](m.W3)
m.M.Set(5, 5, v)
} else {
y.Set(3, 0, 0)
y.Set(4, 0, 0)
y.Set(5, 0, 0)
m.M.Set(3, 3, Big)
m.M.Set(4, 4, Big)
m.M.Set(5, 5, Big)
}
if m.SValid {
_, _, v = m.Accums[6](m.A1)
m.M.Set(6, 6, v)
_, _, v = m.Accums[7](m.A2)
m.M.Set(7, 7, v)
_, _, v = m.Accums[8](m.A3)
m.M.Set(8, 8, v)
_, _, v = m.Accums[9](m.B1)
m.M.Set(9, 9, v)
_, _, v = m.Accums[10](m.B2)
m.M.Set(10, 10, v)
_, _, v = m.Accums[11](m.B3)
m.M.Set(11, 11, v)
} else {
y.Set( 6, 0, 0)
y.Set( 7, 0, 0)
y.Set( 8, 0, 0)
y.Set( 9, 0, 0)
y.Set(10, 0, 0)
y.Set(11, 0, 0)
m.M.Set( 6, 6, Big)
m.M.Set( 7, 7, Big)
m.M.Set( 8, 8, Big)
m.M.Set( 9, 9, Big)
m.M.Set(10, 10, Big)
m.M.Set(11, 11, Big)
}
if m.MValid {
_, _, v = m.Accums[12](m.M1)
m.M.Set(12, 12, v)
_, _, v = m.Accums[13](m.M2)
m.M.Set(13, 13, v)
_, _, v = m.Accums[14](m.M3)
m.M.Set(14, 14, v)
} else {
y.Set(12, 0, 0)
y.Set(13, 0, 0)
y.Set(14, 0, 0)
m.M.Set(12, 12, Big)
m.M.Set(13, 13, Big)
m.M.Set(14, 14, Big)
}
ss := matrix.Sum(matrix.Product(h, matrix.Product(s.M, h.Transpose())), m.M)
m2, err := ss.Inverse()
if err != nil {
log.Println("AHRS: Can't invert Kalman gain matrix")
return
}
kk := matrix.Product(s.M, matrix.Product(h.Transpose(), m2))
su := matrix.Product(kk, y)
s.U1 += su.Get( 0, 0)
s.U2 += su.Get( 1, 0)
s.U3 += su.Get( 2, 0)
s.Z1 += su.Get( 3, 0)
s.Z2 += su.Get( 4, 0)
s.Z3 += su.Get( 5, 0)
s.E0 += su.Get( 6, 0)
s.E1 += su.Get( 7, 0)
s.E2 += su.Get( 8, 0)
s.E3 += su.Get( 9, 0)
s.H1 += su.Get(10, 0)
s.H2 += su.Get(11, 0)
s.H3 += su.Get(12, 0)
s.N1 += su.Get(13, 0)
s.N2 += su.Get(14, 0)
s.N3 += su.Get(15, 0)
s.V1 += su.Get(16, 0)
s.V2 += su.Get(17, 0)
s.V3 += su.Get(18, 0)
s.C1 += su.Get(19, 0)
s.C2 += su.Get(20, 0)
s.C3 += su.Get(21, 0)
s.F0 += su.Get(22, 0)
s.F1 += su.Get(23, 0)
s.F2 += su.Get(24, 0)
s.F3 += su.Get(25, 0)
s.D1 += su.Get(26, 0)
s.D2 += su.Get(27, 0)
s.D3 += su.Get(28, 0)
s.L1 += su.Get(29, 0)
s.L2 += su.Get(30, 0)
s.L3 += su.Get(31, 0)
s.T = m.T
s.M = matrix.Product(matrix.Difference(matrix.Eye(32), matrix.Product(kk, h)), s.M)
s.normalize()
}
func (s *KalmanState) PredictMeasurement() (m *Measurement) {
m = NewMeasurement()
m.UValid = true
m.U1 = s.U1
m.U2 = s.U2
m.U3 = s.U3
m.WValid = true
m.W1 = s.e11*s.U1 + s.e12*s.U2 + s.e13*s.U3 + s.V1
m.W2 = s.e21*s.U1 + s.e22*s.U2 + s.e23*s.U3 + s.V2
m.W3 = s.e31*s.U1 + s.e32*s.U2 + s.e33*s.U3 + s.V3
m.SValid = true
// Include pseudoforces from non-inertial frame! Why we see "contamination" of accel from gyro
h1 := s.H1*s.e11 + s.H2*s.e21 + s.H3*s.e31
h2 := s.H1*s.e12 + s.H2*s.e22 + s.H3*s.e32
h3 := s.H1*s.e13 + s.H2*s.e23 + s.H3*s.e33
a1 := -s.Z1 + (h3*s.U2 - h2*s.U3)*Deg/G - s.e31
a2 := -s.Z2 + (h1*s.U3 - h3*s.U1)*Deg/G - s.e32
a3 := -s.Z3 + (h2*s.U1 - h1*s.U2)*Deg/G - s.e33
m.A1 = s.f11*a1 + s.f12*a2 + s.f13*a3 + s.C1
m.A2 = s.f21*a1 + s.f22*a2 + s.f23*a3 + s.C2
m.A3 = s.f31*a1 + s.f32*a2 + s.f33*a3 + s.C3
m.B1 = s.f11*h1 + s.f12*h2 + s.f13*h3 + s.D1
m.B2 = s.f21*h1 + s.f22*h2 + s.f23*h3 + s.D2
m.B3 = s.f31*h1 + s.f32*h2 + s.f33*h3 + s.D3
m.MValid = true
m1 := s.N1*s.e11 + s.N2*s.e21 + s.N3*s.e31 + s.L1
m2 := s.N1*s.e12 + s.N2*s.e22 + s.N3*s.e32 + s.L2
m3 := s.N1*s.e13 + s.N2*s.e23 + s.N3*s.e33 + s.L3
m.M1 = s.f11*m1 + s.f12*m2 + s.f13*m3
m.M2 = s.f21*m1 + s.f22*m2 + s.f23*m3
m.M3 = s.f31*m1 + s.f32*m2 + s.f33*m3
m.T = s.T
return
}
func (s *KalmanState) calcJacobianState(t float64) (jac *matrix.DenseMatrix) {
dt := t-s.T
jac = matrix.Eye(32)
// U*3, Z*3, E*4, H*3, N*3,
// V*3, C*3, F*4, D*3, L*3
//s.U1 += dt*s.Z1*G
jac.Set(0, 3, dt*G) // U1/Z1
//s.U2 += dt*s.Z2*G
jac.Set(1, 4, dt*G) // U2/Z2
//s.U3 += dt*s.Z3*G
jac.Set(2, 5, dt*G) // U3/Z3
//s.E0 += 0.5*dt*(-s.H1*s.E1 - s.H2*s.E2 - s.H3*s.E3)*Deg
jac.Set(6, 7, -0.5*dt*s.H1*Deg) // E0/E1
jac.Set(6, 8, -0.5*dt*s.H2*Deg) // E0/E2
jac.Set(6, 9, -0.5*dt*s.H3*Deg) // E0/E3
jac.Set(6, 10, -0.5*dt*s.E1*Deg) // E0/H1
jac.Set(6, 11, -0.5*dt*s.E2*Deg) // E0/H2
jac.Set(6, 12, -0.5*dt*s.E3*Deg) // E0/H3
//s.E1 += 0.5*dt*(+s.H1*s.E0 + s.H2*s.E3 - s.H3*s.E2)*Deg
jac.Set(7, 6, +0.5*dt*s.H1*Deg) // E1/E0
jac.Set(7, 8, -0.5*dt*s.H3*Deg) // E1/E2
jac.Set(7, 9, +0.5*dt*s.H2*Deg) // E1/E3
jac.Set(7, 10, +0.5*dt*s.E0*Deg) // E1/H1
jac.Set(7, 11, +0.5*dt*s.E3*Deg) // E1/H2
jac.Set(7, 12, -0.5*dt*s.E2*Deg) // E1/H3
//s.E2 += 0.5*dt*(-s.H1*s.E3 + s.H2*s.E0 + s.H3*s.E1)*Deg
jac.Set(8, 6, +0.5*dt*s.H2*Deg) // E2/E0
jac.Set(8, 7, +0.5*dt*s.H3*Deg) // E2/E1
jac.Set(8, 9, -0.5*dt*s.H1*Deg) // E2/E3
jac.Set(8, 10, -0.5*dt*s.E3*Deg) // E2/H1
jac.Set(8, 11, +0.5*dt*s.E0*Deg) // E2/H2
jac.Set(8, 12, +0.5*dt*s.E1*Deg) // E2/H3
//s.E3 += 0.5*dt*(+s.H1*s.E2 - s.H2*s.E1 + s.H3*s.E0)*Deg
jac.Set(9, 6, +0.5*dt*s.H3*Deg) // E3/E0
jac.Set(9, 7, -0.5*dt*s.H2*Deg) // E3/E1
jac.Set(9, 8, +0.5*dt*s.H1*Deg) // E3/E2
jac.Set(9, 10, +0.5*dt*s.E2*Deg) // E3/H1
jac.Set(9, 11, -0.5*dt*s.E1*Deg) // E3/H2
jac.Set(9, 12, +0.5*dt*s.E0*Deg) // E3/H3
return
}
func (s *KalmanState) calcJacobianMeasurement() (jac *matrix.DenseMatrix) {
jac = matrix.Zeros(15, 32)
// U*3, Z*3, E*4, H*3, N*3,
// V*3, C*3, F*4, D*3, L*3
// U*3, W*3, A*3, B*3, M*3
//m.U1 = s.U1
jac.Set(0, 0, 1) // U1/U1
//m.U2 = s.U2
jac.Set(1, 1, 1) // U2/U2
//m.U3 = s.U3
jac.Set(2, 2, 1) // U3/U3
w1 := s.e11*s.U1 + s.e12*s.U2 + s.e13*s.U3
//s.e11 = 2*(+s.E0 * s.E0 + s.E1 * s.E1 - 0.5)
//s.e12 = 2*(-s.E0 * s.E3 + s.E1 * s.E2)
//s.e13 = 2*(+s.E0 * s.E2 + s.E1 * s.E3)
jac.Set(3, 0, s.e11) // W1/U1
jac.Set(3, 1, s.e12) // W1/U2
jac.Set(3, 2, s.e13) // W1/U3
jac.Set(3, 6, // W1/E0
2*(+s.E0*s.U1 - s.E3*s.U2 + s.E2*s.U3) -
2*w1*s.E0)
jac.Set(3, 7, // W1/E1
2*(+s.E1*s.U1 + s.E2*s.U2 + s.E3*s.U3) -
2*w1*s.E1)
jac.Set(3, 8, // W1/E2
2*(-s.E2*s.U1 + s.E1*s.U2 + s.E0*s.U3) -
2*w1*s.E2)
jac.Set(3, 9, // W1/E3
2*(-s.E3*s.U1 - s.E0*s.U2 + s.E1*s.U3) -
2*w1*s.E3)
jac.Set(3, 16, 1) // W1/V1
w2 := s.e21*s.U1 + s.e22*s.U2 + s.e23*s.U3
//s.e21 = 2*(+s.E0 * s.E3 + s.E2 * s.E1)
//s.e22 = 2*(+s.E0 * s.E0 + s.E2 * s.E2 - 0.5)
//s.e23 = 2*(-s.E0 * s.E1 + s.E2 * s.E3)
jac.Set(4, 0, s.e21) // W2/U1
jac.Set(4, 1, s.e22) // W2/U2
jac.Set(4, 2, s.e23) // W2/U3
jac.Set(4, 6, // W2/E0
2*(+s.E3*s.U1 + s.E0*s.U2 - s.E1*s.U3) -
2*w2*s.E0)
jac.Set(4, 7, // W2/E1
2*(+s.E2*s.U1 - s.E1*s.U2 - s.E0*s.U3) -
2*w2*s.E1)
jac.Set(4, 8, // W2/E2
2*(+s.E1*s.U1 + s.E2*s.U2 + s.E3*s.U3) -
2*w2*s.E2)
jac.Set(4, 9, // W2/E3
2*(+s.E0*s.U1 - s.E3*s.U2 + s.E2*s.U3) -
2*w2*s.E3)
jac.Set(4, 17, 1) // W2/V2
w3 := s.e31*s.U1 + s.e32*s.U2 + s.e33*s.U3
//s.e31 = 2*(-s.E0 * s.E2 + s.E3 * s.E1)
//s.e32 = 2*(+s.E0 * s.E1 + s.E3 * s.E2)
//s.e33 = 2*(+s.E0 * s.E0 + s.E3 * s.E3 - 0.5)
jac.Set(5, 0, s.e31) // W3/U1
jac.Set(5, 1, s.e32) // W3/U2
jac.Set(5, 2, s.e33) // W3/U3
jac.Set(5, 6, // W3/E0
2*(-s.E2*s.U1 + s.E1*s.U2 + s.E0*s.U3) -
2*w3*s.E0)
jac.Set(5, 7, // W3/E1
2*(+s.E3*s.U1 + s.E0*s.U2 - s.E1*s.U3) -
2*w3*s.E1)
jac.Set(5, 8, // W3/E2
2*(-s.E0*s.U1 + s.E3*s.U2 - s.E2*s.U3) -
2*w3*s.E2)
jac.Set(5, 9, // W3/E3
2*(+s.E1*s.U1 + s.E2*s.U2 + s.E3*s.U3) -
2*w3*s.E3)
jac.Set(5, 18, 1) // W3/V3
h1 := s.H1*s.e11 + s.H2*s.e21 + s.H3*s.e31
h2 := s.H1*s.e12 + s.H2*s.e22 + s.H3*s.e32
h3 := s.H1*s.e13 + s.H2*s.e23 + s.H3*s.e33
a1 := -s.Z1 + (h3*s.U2 - h2*s.U3)*Deg/G - s.e31
a2 := -s.Z2 + (h1*s.U3 - h3*s.U1)*Deg/G - s.e32
a3 := -s.Z3 + (h2*s.U1 - h1*s.U2)*Deg/G - s.e33
ae1 := s.f11*(a1+s.Z1) + s.f12*(a2+s.Z2) + s.f13*(a3+s.Z3)
af1 := s.f11*a1 + s.f12*a2 + s.f13*a3
jac.Set(6, 0, (s.f13*h2 - s.f12*h3)*Deg/G) // A1/U1
jac.Set(6, 1, (s.f11*h3 - s.f13*h1)*Deg/G) // A1/U2
jac.Set(6, 2, (s.f12*h1 - s.f11*h2)*Deg/G) // A1/U3
jac.Set(6, 3, -s.f11) // A1/Z1
jac.Set(6, 4, -s.f12) // A1/Z2
jac.Set(6, 5, -s.f13) // A1/Z3
jac.Set(6, 6, 2*Deg/G*( // A1/E0
s.f11*(s.H1*( s.E2*s.U2 + s.E3*s.U3) + s.H2*(-s.E1*s.U2 - s.E0*s.U3) + s.H3*( s.E0*s.U2 - s.E1*s.U3)) +
s.f12*(s.H1*( s.E0*s.U3 - s.E2*s.U1) + s.H2*( s.E3*s.U3 + s.E1*s.U1) + s.H3*(-s.E2*s.U3 - s.E0*s.U1)) +
s.f13*(s.H1*(-s.E3*s.U1 - s.E0*s.U2) + s.H2*( s.E0*s.U1 - s.E3*s.U2) + s.H3*( s.E1*s.U1 + s.E2*s.U2)) ) -
2* ae1 *s.E0 -
2*(s.f11*(-s.E2) + s.f12*( s.E1) + s.f13*( s.E0)) )
jac.Set(6, 7, 2*Deg/G*( // A1/E1
s.f11*(s.H1*( s.E3*s.U2 - s.E2*s.U3) + s.H2*(-s.E0*s.U2 + s.E1*s.U3) + s.H3*(-s.E1*s.U2 - s.E0*s.U3)) +
s.f12*(s.H1*( s.E1*s.U3 - s.E3*s.U1) + s.H2*( s.E2*s.U3 + s.E0*s.U1) + s.H3*( s.E3*s.U3 + s.E1*s.U1)) +
s.f13*(s.H1*( s.E2*s.U1 - s.E1*s.U2) + s.H2*(-s.E1*s.U1 - s.E2*s.U2) + s.H3*( s.E0*s.U1 - s.E3*s.U2)) ) -
2* ae1 *s.E1 -
2*(s.f11*( s.E3) + s.f12*( s.E0) + s.f13*(-s.E1)) )
jac.Set(6, 8, 2*Deg/G*( // A1/E2
s.f11*(s.H1*( s.E0*s.U2 - s.E1*s.U3) + s.H2*( s.E3*s.U2 - s.E2*s.U3) + s.H3*(-s.E2*s.U2 - s.E3*s.U3)) +
s.f12*(s.H1*(-s.E2*s.U3 - s.E0*s.U1) + s.H2*( s.E1*s.U3 - s.E3*s.U1) + s.H3*(-s.E0*s.U3 + s.E2*s.U1)) +
s.f13*(s.H1*( s.E1*s.U1 + s.E2*s.U2) + s.H2*( s.E2*s.U1 - s.E1*s.U2) + s.H3*( s.E3*s.U1 + s.E0*s.U2)) ) -
2* ae1 *s.E2 -
2*(s.f11*(-s.E0) + s.f12*( s.E3) + s.f13*(-s.E2)) )
jac.Set(6, 9, 2*Deg/G*( // A1/E3
s.f11*(s.H1*( s.E1*s.U2 + s.E0*s.U3) + s.H2*( s.E2*s.U2 + s.E3*s.U3) + s.H3*( s.E3*s.U2 - s.E2*s.U3)) +
s.f12*(s.H1*(-s.E3*s.U3 - s.E1*s.U1) + s.H2*( s.E0*s.U3 - s.E2*s.U1) + s.H3*( s.E1*s.U3 - s.E3*s.U1)) +
s.f13*(s.H1*(-s.E0*s.U1 + s.E3*s.U2) + s.H2*(-s.E3*s.U1 - s.E0*s.U2) + s.H3*( s.E2*s.U1 - s.E1*s.U2)) ) -
2* ae1 *s.E3 -
2*(s.f11*( s.E1) + s.f12*( s.E2) + s.f13*( s.E3)) )
jac.Set(6, 10, Deg/G*( // A1/H1
s.f11*(s.U2*s.e13 - s.U3*s.e12) +
s.f12*(s.U3*s.e11 - s.U1*s.e13) +
s.f13*(s.U1*s.e12 - s.U2*s.e11) ))
jac.Set(6, 11, Deg/G*( // A1/H2
s.f11*(s.U2*s.e23 - s.U3*s.e22) +
s.f12*(s.U3*s.e21 - s.U1*s.e23) +
s.f13*(s.U1*s.e22 - s.U2*s.e21) ))
jac.Set(6, 12, Deg/G*( // A1/H3
s.f11*(s.U2*s.e33 - s.U3*s.e32) +
s.f12*(s.U3*s.e31 - s.U1*s.e33) +
s.f13*(s.U1*s.e32 - s.U2*s.e31) ))
jac.Set(6, 19, 1) // A1/C1
jac.Set(6, 22, // A1/F0
2*(+s.F0*a1 - s.F3*a2 + s.F2*a3) -
2*af1*s.F0)
jac.Set(6, 23, // A1/F1
2*(+s.F1*a1 + s.F2*a2 + s.F3*a3) -
2*af1*s.F1)
jac.Set(6, 24, // A1/F2
2*(-s.F2*a1 + s.F1*a2 + s.F0*a3) -
2*af1*s.F2)
jac.Set(6, 25, // A1/F3
2*(-s.F3*a1 - s.F0*a2 + s.F1*a3) -
2*af1*s.F3)
aa2 := s.f21*(a1+s.Z1) + s.f22*(a2+s.Z2) + s.f23*(a3+s.Z3)
af2 := s.f21*a1 + s.f22*a2 + s.f23*a3
jac.Set(7, 0, (h2*s.f23 - h3*s.f22)*Deg/G) // A2/U1
jac.Set(7, 1, (h3*s.f21 - h1*s.f23)*Deg/G) // A2/U2
jac.Set(7, 2, (h1*s.f22 - h2*s.f21)*Deg/G) // A2/U3
jac.Set(7, 3, -s.f21) // A2/Z1
jac.Set(7, 4, -s.f22) // A2/Z2
jac.Set(7, 5, -s.f23) // A2/Z3
jac.Set(7, 6, 2*Deg/G*( // A2/E0
s.f21*(s.H1*( s.E2*s.U2 + s.E3*s.U3) + s.H2*(-s.E1*s.U2 - s.E0*s.U3) + s.H3*( s.E0*s.U2 - s.E1*s.U3)) +
s.f22*(s.H1*( s.E0*s.U3 - s.E2*s.U1) + s.H2*( s.E3*s.U3 + s.E1*s.U1) + s.H3*(-s.E2*s.U3 - s.E0*s.U1)) +
s.f23*(s.H1*(-s.E3*s.U1 - s.E0*s.U2) + s.H2*( s.E0*s.U1 - s.E3*s.U2) + s.H3*( s.E1*s.U1 + s.E2*s.U2)) ) -
2*aa2*s.E0 -
2*(s.f21*(-s.E2) + s.f22*( s.E1) + s.f23*( s.E0)) )
jac.Set(7, 7, 2*Deg/G*( // A2/E1
s.f21*(s.H1*( s.E3*s.U2 - s.E2*s.U3) + s.H2*(-s.E0*s.U2 + s.E1*s.U3) + s.H3*(-s.E1*s.U2 - s.E0*s.U3)) +
s.f22*(s.H1*( s.E1*s.U3 - s.E3*s.U1) + s.H2*( s.E2*s.U3 + s.E0*s.U1) + s.H3*( s.E3*s.U3 + s.E1*s.U1)) +
s.f23*(s.H1*( s.E2*s.U1 - s.E1*s.U2) + s.H2*(-s.E1*s.U1 - s.E2*s.U2) + s.H3*( s.E0*s.U1 - s.E3*s.U2)) ) -
2*aa2*s.E1 -
2*(s.f21*( s.E3) + s.f22*( s.E0) + s.f23*(-s.E1)) )
jac.Set(7, 8, 2*Deg/G*( // A2/E2
s.f21*(s.H1*( s.E0*s.U2 - s.E1*s.U3) + s.H2*( s.E3*s.U2 - s.E2*s.U3) + s.H3*(-s.E2*s.U2 - s.E3*s.U3)) +
s.f22*(s.H1*(-s.E2*s.U3 - s.E0*s.U1) + s.H2*( s.E1*s.U3 - s.E3*s.U1) + s.H3*(-s.E0*s.U3 + s.E2*s.U1)) +
s.f23*(s.H1*( s.E1*s.U1 + s.E2*s.U2) + s.H2*( s.E2*s.U1 - s.E1*s.U2) + s.H3*( s.E3*s.U1 + s.E0*s.U2)) ) -
2*aa2*s.E2 -
2*(s.f21*(-s.E0) + s.f22*( s.E3) + s.f23*(-s.E2)) )
jac.Set(7, 9, 2*Deg/G*( // A2/E3
s.f21*(s.H1*( s.E1*s.U2 + s.E0*s.U3) + s.H2*( s.E2*s.U2 + s.E3*s.U3) + s.H3*( s.E3*s.U2 - s.E2*s.U3)) +
s.f22*(s.H1*(-s.E3*s.U3 - s.E1*s.U1) + s.H2*( s.E0*s.U3 - s.E2*s.U1) + s.H3*( s.E1*s.U3 - s.E3*s.U1)) +
s.f23*(s.H1*(-s.E0*s.U1 + s.E3*s.U2) + s.H2*(-s.E3*s.U1 - s.E0*s.U2) + s.H3*( s.E2*s.U1 - s.E1*s.U2)) ) -
2*aa2*s.E3 -
2*(s.f21*( s.E1) + s.f22*( s.E2) + s.f23*( s.E3)) )
jac.Set(7, 10, Deg/G*( // A2/H1
s.f21*(s.U2*s.e13 - s.U3*s.e12) +
s.f22*(s.U3*s.e11 - s.U1*s.e13) +
s.f23*(s.U1*s.e12 - s.U2*s.e11) ))
jac.Set(7, 11, Deg/G*( // A2/H2
s.f21*(s.U2*s.e23 - s.U3*s.e22) +
s.f22*(s.U3*s.e21 - s.U1*s.e23) +
s.f23*(s.U1*s.e22 - s.U2*s.e21) ))
jac.Set(7, 12, Deg/G*( // A2/H3
s.f21*(s.U2*s.e33 - s.U3*s.e32) +
s.f22*(s.U3*s.e31 - s.U1*s.e33) +
s.f23*(s.U1*s.e32 - s.U2*s.e31) ))
jac.Set(7, 20, 1) // A2/C2
jac.Set(7, 22, // A2/F0
2*(+s.F3*a1 + s.F0*a2 - s.F1*a3) -
2*af2*s.F0)
jac.Set(7, 23, // A2/F1
2*(+s.F2*a1 - s.F1*a2 - s.F0*a3) -
2*af2*s.F1)
jac.Set(7, 24, // A2/F2
2*(+s.F1*a1 + s.F2*a2 + s.F3*a3) -
2*af2*s.F2)
jac.Set(7, 25, // A2/F3
2*(+s.F0*a1 - s.F3*a2 + s.F2*a3) -
2*af2*s.F3)
aa3 := s.f31*(a1+s.Z1) + s.f32*(a2+s.Z2) + s.f33*(a3+s.Z3)
af3 := s.f31*a1 + s.f32*a2 + s.f33*a3
jac.Set(8, 0, (h2*s.f33 - h3*s.f32)*Deg/G) // A3/U1
jac.Set(8, 1, (h3*s.f31 - h1*s.f33)*Deg/G) // A3/U2
jac.Set(8, 2, (h1*s.f32 - h2*s.f31)*Deg/G) // A3/U3
jac.Set(8, 3, -s.f31) // A3/Z1
jac.Set(8, 4, -s.f32) // A3/Z2
jac.Set(8, 5, -s.f33) // A3/Z3
jac.Set(8, 6, 2*Deg/G*( // A3/E0
s.f31*(s.H1*( s.E2*s.U2 + s.E3*s.U3) + s.H2*(-s.E1*s.U2 - s.E0*s.U3) + s.H3*( s.E0*s.U2 - s.E1*s.U3)) +
s.f32*(s.H1*( s.E0*s.U3 - s.E2*s.U1) + s.H2*( s.E3*s.U3 + s.E1*s.U1) + s.H3*(-s.E2*s.U3 - s.E0*s.U1)) +
s.f33*(s.H1*(-s.E3*s.U1 - s.E0*s.U2) + s.H2*( s.E0*s.U1 - s.E3*s.U2) + s.H3*( s.E1*s.U1 + s.E2*s.U2)) ) -
2*aa3*s.E0 -
2*(s.f31*(-s.E2) + s.f32*( s.E1) + s.f33*( s.E0)) )
jac.Set(8, 7, 2*Deg/G*( // A3/E1
s.f31*(s.H1*( s.E3*s.U2 - s.E2*s.U3) + s.H2*(-s.E0*s.U2 + s.E1*s.U3) + s.H3*(-s.E1*s.U2 - s.E0*s.U3)) +
s.f32*(s.H1*( s.E1*s.U3 - s.E3*s.U1) + s.H2*( s.E2*s.U3 + s.E0*s.U1) + s.H3*( s.E3*s.U3 + s.E1*s.U1)) +
s.f33*(s.H1*( s.E2*s.U1 - s.E1*s.U2) + s.H2*(-s.E1*s.U1 - s.E2*s.U2) + s.H3*( s.E0*s.U1 - s.E3*s.U2)) ) -
2*aa3*s.E1 -
2*(s.f31*( s.E3) + s.f32*( s.E0) + s.f33*(-s.E1)) )
jac.Set(8, 8, 2*Deg/G*( // A3/E2
s.f31*(s.H1*( s.E0*s.U2 - s.E1*s.U3) + s.H2*( s.E3*s.U2 - s.E2*s.U3) + s.H3*(-s.E2*s.U2 - s.E3*s.U3)) +
s.f32*(s.H1*(-s.E2*s.U3 - s.E0*s.U1) + s.H2*( s.E1*s.U3 - s.E3*s.U1) + s.H3*(-s.E0*s.U3 + s.E2*s.U1)) +
s.f33*(s.H1*( s.E1*s.U1 + s.E2*s.U2) + s.H2*( s.E2*s.U1 - s.E1*s.U2) + s.H3*( s.E3*s.U1 + s.E0*s.U2)) ) -
2*aa3*s.E2 -
2*(s.f31*(-s.E0) + s.f32*( s.E3) + s.f33*(-s.E2)) )
jac.Set(8, 9, 2*Deg/G*( // A3/E3
s.f31*(s.H1*( s.E1*s.U2 + s.E0*s.U3) + s.H2*( s.E2*s.U2 + s.E3*s.U3) + s.H3*( s.E3*s.U2 - s.E2*s.U3)) +
s.f32*(s.H1*(-s.E3*s.U3 - s.E1*s.U1) + s.H2*( s.E0*s.U3 - s.E2*s.U1) + s.H3*( s.E1*s.U3 - s.E3*s.U1)) +
s.f33*(s.H1*(-s.E0*s.U1 + s.E3*s.U2) + s.H2*(-s.E3*s.U1 - s.E0*s.U2) + s.H3*( s.E2*s.U1 - s.E1*s.U2)) ) -
2*aa3*s.E3 -
2*(s.f31*( s.E1) + s.f32*( s.E2) + s.f33*( s.E3)) )
jac.Set(8, 10, Deg/G*( // A3/H1
s.f31*(s.U2*s.e13 - s.U3*s.e12) +
s.f32*(s.U3*s.e11 - s.U1*s.e13) +
s.f33*(s.U1*s.e12 - s.U2*s.e11) ))
jac.Set(8, 11, Deg/G*( // A3/H2
s.f31*(s.U2*s.e23 - s.U3*s.e22) +
s.f32*(s.U3*s.e21 - s.U1*s.e23) +
s.f33*(s.U1*s.e22 - s.U2*s.e21) ))
jac.Set(8, 12, Deg/G*( // A3/H3
s.f31*(s.U2*s.e33 - s.U3*s.e32) +
s.f32*(s.U3*s.e31 - s.U1*s.e33) +
s.f33*(s.U1*s.e32 - s.U2*s.e31) ))
jac.Set(8, 21, 1) // A3/C3
jac.Set(8, 22, // A3/F0
2*(-s.F2*a1 + s.F1*a2 + s.F0*a3) -
2*af3*s.F0)
jac.Set(8, 23, // A3/F1
2*(+s.F3*a1 + s.F0*a2 - s.F1*a3) -
2*af3*s.F1)
jac.Set(8, 24, // A3/F2
2*(-s.F0*a1 + s.F3*a2 - s.F2*a3) -
2*af3*s.F2)
jac.Set(8, 25, // A3/F3
2*(+s.F1*a1 + s.F2*a2 + s.F3*a3) -
2*af3*s.F3)
b1 := s.f11*h1 + s.f12*h2 + s.f13*h3
bf1 := b1 + s.D1
jac.Set(9, 6, // B1/E0
2*( s.E0*s.H1 + s.E3*s.H2 - s.E2*s.H3)*s.f11 +
2*(-s.E3*s.H1 + s.E0*s.H2 + s.E1*s.H3)*s.f12 +
2*( s.E2*s.H1 - s.E1*s.H2 + s.E0*s.H3)*s.f13 -
2*b1*s.E0)
jac.Set(9, 7, // B1/E1
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f11 +
2*( s.E2*s.H1 - s.E1*s.H2 + s.E0*s.H3)*s.f12 +
2*( s.E3*s.H1 - s.E0*s.H2 - s.E1*s.H3)*s.f13 -
2*b1*s.E1)
jac.Set(9, 8, // B1/E2
2*(-s.E2*s.H1 + s.E1*s.H2 - s.E0*s.H3)*s.f11 +
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f12 +
2*( s.E0*s.H1 + s.E3*s.H2 - s.E2*s.H3)*s.f13 -
2*b1*s.E2)
jac.Set(9, 9, // B1/E3
2*(-s.E3*s.H1 + s.E0*s.H2 + s.E1*s.H3)*s.f11 +
2*(-s.E0*s.H1 - s.E3*s.H2 + s.E2*s.H3)*s.f12 +
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f13 -
2*b1*s.E3)
jac.Set(9, 10, s.e11*s.f11 + s.e12*s.f12 + s.e13*s.f13 ) // B1/H1
jac.Set(9, 11, s.e21*s.f11 + s.e22*s.f12 + s.e23*s.f13 ) // B1/H2
jac.Set(9, 12, s.e31*s.f11 + s.e32*s.f12 + s.e33*s.f13 ) // B1/H3
jac.Set(9, 22, 2*( h1*s.F0 - h2*s.F3 + h3*s.F2) - // B1/F0
2*bf1*s.F0)
jac.Set(9, 23, 2*( h1*s.F1 + h2*s.F2 + h3*s.F3) - // B1/F1
2*bf1*s.F1)
jac.Set(9, 24, 2*(-h1*s.F2 + h2*s.F1 + h3*s.F0) - // B1/F2
2*bf1*s.F2)
jac.Set(9, 25, 2*(-h1*s.F3 - h2*s.F0 + h3*s.F1) - // B1/F3
2*bf1*s.F3)
jac.Set(9, 26, 1) // B1/D1
b2 := s.f21*h1 + s.f22*h2 + s.f23*h3
bf2 := b2 + s.D2
jac.Set(10, 6, // B2/E0
2*( s.E0*s.H1 + s.E3*s.H2 - s.E2*s.H3)*s.f21 +
2*(-s.E3*s.H1 + s.E0*s.H2 + s.E1*s.H3)*s.f22 +
2*( s.E2*s.H1 - s.E1*s.H2 + s.E0*s.H3)*s.f23 -
2*b2*s.E0)
jac.Set(10, 7, // B2/E1
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f21 +
2*( s.E2*s.H1 - s.E1*s.H2 + s.E0*s.H3)*s.f22 +
2*( s.E3*s.H1 - s.E0*s.H2 - s.E1*s.H3)*s.f23 -
2*b2*s.E1)
jac.Set(10, 8, // B2/E2
2*(-s.E2*s.H1 + s.E1*s.H2 - s.E0*s.H3)*s.f21 +
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f22 +
2*( s.E0*s.H1 + s.E3*s.H2 - s.E2*s.H3)*s.f23 -
2*b2*s.E2)
jac.Set(10, 9, // B2/E3
2*(-s.E3*s.H1 + s.E0*s.H2 + s.E1*s.H3)*s.f21 +
2*(-s.E0*s.H1 - s.E3*s.H2 + s.E2*s.H3)*s.f22 +
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f23 -
2*b2*s.E3)
jac.Set(10, 10, s.e11*s.f21 + s.e12*s.f22 + s.e13*s.f23 ) // B2/H1
jac.Set(10, 11, s.e21*s.f21 + s.e22*s.f22 + s.e23*s.f23 ) // B2/H2
jac.Set(10, 12, s.e31*s.f21 + s.e32*s.f22 + s.e33*s.f23 ) // B2/H3
jac.Set(10, 22, 2*( h1*s.F3 + h2*s.F0 - h3*s.F1) - // B2/F0
2*bf2*s.F0)
jac.Set(10, 23, 2*( h1*s.F2 - h2*s.F1 - h3*s.F0) - // B2/F1
2*bf2*s.F1)
jac.Set(10, 24, 2*( h1*s.F1 + h2*s.F2 + h3*s.F3) - // B2/F2
2*bf2*s.F2)
jac.Set(10, 25, 2*( h1*s.F0 - h2*s.F3 + h3*s.F2) - // B2/F3
2*bf2*s.F3)
jac.Set(10, 27, 1) // B2/D2
b3 := s.f31*h1 + s.f32*h2 + s.f33*h3
bf3 := b3 + s.D3
jac.Set(11, 6, // B3/E0
2*( s.E0*s.H1 + s.E3*s.H2 - s.E2*s.H3)*s.f31 +
2*(-s.E3*s.H1 + s.E0*s.H2 + s.E1*s.H3)*s.f32 +
2*( s.E2*s.H1 - s.E1*s.H2 + s.E0*s.H3)*s.f33 -
2*b3*s.E0)
jac.Set(11, 7, // B3/E1
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f31 +
2*( s.E2*s.H1 - s.E1*s.H2 + s.E0*s.H3)*s.f32 +
2*( s.E3*s.H1 - s.E0*s.H2 - s.E1*s.H3)*s.f33 -
2*b3*s.E1)
jac.Set(11, 8, // B3/E2
2*(-s.E2*s.H1 + s.E1*s.H2 - s.E0*s.H3)*s.f31 +
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f32 +
2*( s.E0*s.H1 + s.E3*s.H2 - s.E2*s.H3)*s.f33 -
2*b3*s.E2)
jac.Set(11, 9, // B3/E2
2*(-s.E3*s.H1 + s.E0*s.H2 + s.E1*s.H3)*s.f31 +
2*(-s.E0*s.H1 - s.E3*s.H2 + s.E2*s.H3)*s.f32 +
2*( s.E1*s.H1 + s.E2*s.H2 + s.E3*s.H3)*s.f33 -
2*b3*s.E3)
jac.Set(11, 10, s.e11*s.f31 + s.e12*s.f32 + s.e13*s.f33 ) // B3/H1
jac.Set(11, 11, s.e21*s.f31 + s.e22*s.f32 + s.e23*s.f33 ) // B3/H2
jac.Set(11, 12, s.e31*s.f31 + s.e32*s.f32 + s.e33*s.f33 ) // B3/H3
jac.Set(11, 22, 2*(-h1*s.F2 + h2*s.F1 + h3*s.F0) - // B3/F0
2*bf3*s.F0)
jac.Set(11, 23, 2*( h1*s.F3 + h2*s.F0 - h3*s.F1) - // B3/F1
2*bf3*s.F1)
jac.Set(11, 24, 2*(-h1*s.F0 + h2*s.F3 - h3*s.F2) - // B3/F2
2*bf3*s.F2)
jac.Set(11, 25, 2*( h1*s.F1 + h2*s.F2 + h3*s.F3) - // B3/F3
2*bf3*s.F3)
jac.Set(11, 28, 1) // B3/D3
//TODO westphae: fix these
/*
m1 := s.N1*s.e11 + s.N2*s.e21 + s.N3*s.e31 + s.L1
m2 := s.N1*s.e12 + s.N2*s.e22 + s.N3*s.e32 + s.L2
m3 := s.N1*s.e13 + s.N2*s.e23 + s.N3*s.e33 + s.L3
m.M1 = s.f11*m1 + s.f21*m2 + s.f31*m3 + s.L1
jac.Set(12, 6, 2*(+s.E0*s.N1 - s.E3*s.N2 + s.E2*s.N3)) // M1/E0
jac.Set(12, 7, 2*(+s.E1*s.N1 + s.E2*s.N2 + s.E3*s.N3)) // M1/E1
jac.Set(12, 8, 2*(-s.E2*s.N1 + s.E1*s.N2 + s.E0*s.N3)) // M1/E2
jac.Set(12, 9, 2*(-s.E3*s.N1 - s.E0*s.N2 + s.E1*s.N3)) // M1/E3
jac.Set(12, 13, 2*(s.E1*s.E1+s.E0*s.E0-0.5)) // M1/N1
jac.Set(12, 14, 2*(s.E1*s.E2-s.E0*s.E3)) // M1/N2
jac.Set(12, 15, 2*(s.E1*s.E3+s.E0*s.E2)) // M1/N3
jac.Set(12, 29, 1) // M1/L1
m.M2 = s.f12*m1 + s.f22*m2 + s.f32*m3 + s.L2
jac.Set(13, 6, 2*(+s.E3*s.N1 + s.E0*s.N2 - s.E1*s.N3)) // M2/E0
jac.Set(13, 7, 2*(+s.E2*s.N1 - s.E1*s.N2 - s.E0*s.N3)) // M2/E1
jac.Set(13, 8, 2*(+s.E1*s.N1 + s.E2*s.N2 + s.E3*s.N3)) // M2/E2
jac.Set(13, 9, 2*(+s.E0*s.N1 - s.E3*s.N2 + s.E2*s.N3)) // M2/E3
jac.Set(13, 13, 2*(s.E2*s.E1 + s.E0*s.E3)) // M2/N1
jac.Set(13, 14, 2*(s.E2*s.E2 + s.E0*s.E0 - 0.5)) // M2/N2
jac.Set(13, 15, 2*(s.E2*s.E3 - s.E0*s.E1)) // M2/N3
jac.Set(13, 30, 1) // M2/L2
m.M3 = s.f13*m1 + s.f23*m2 + s.f33*m3 + s.L3
jac.Set(14, 6, 2*(-s.E2*s.N1 + s.E1*s.N2 + s.E0*s.N3)) // M3/E0
jac.Set(14, 7, 2*(+s.E3*s.N1 + s.E0*s.N2 - s.E1*s.N3)) // M3/E1
jac.Set(14, 8, 2*(-s.E0*s.N1 + s.E3*s.N2 - s.E2*s.N3)) // M3/E2
jac.Set(14, 9, 2*(+s.E1*s.N1 + s.E2*s.N2 + s.E3*s.N3)) // M3/E3
jac.Set(14, 13, 2*(s.E3*s.E1 - s.E0*s.E2)) // M3/N1
jac.Set(14, 14, 2*(s.E3*s.E2 + s.E0*s.E1)) // M3/N2
jac.Set(14, 15, 2*(s.E3*s.E3 + s.E0*s.E0 - 0.5)) // M3/N3
jac.Set(14, 31, 1) // M3/L3
*/
return
}
var KalmanJSONConfig = "" | ahrs/ahrs_kalman.go | 0.678327 | 0.522141 | ahrs_kalman.go | starcoder |
package day7
import (
"fmt"
"ryepup/advent2021/utils"
"sync"
)
/*
The crabs don't seem interested in your proposed solution. Perhaps you
misunderstand crab engineering?
As it turns out, crab submarine engines don't burn fuel at a constant rate.
Instead, each change of 1 step in horizontal position costs 1 more unit of fuel
than the last: the first step costs 1, the second step costs 2, the third step
costs 3, and so on.
As each crab moves, moving further becomes more expensive. This changes the best
horizontal position to align them all on; in the example above, this becomes 5:
Move from 16 to 5: 66 fuel
Move from 1 to 5: 10 fuel
Move from 2 to 5: 6 fuel
Move from 0 to 5: 15 fuel
Move from 4 to 5: 1 fuel
Move from 2 to 5: 6 fuel
Move from 7 to 5: 3 fuel
Move from 1 to 5: 10 fuel
Move from 2 to 5: 6 fuel
Move from 14 to 5: 45 fuel
This costs a total of 168 fuel. This is the new cheapest possible outcome; the
old alignment position (2) now costs 206 fuel instead.
Determine the horizontal position that the crabs can align to using the least
fuel possible so they can make you an escape route! How much fuel must they
spend to align to that position?
*/
func Part2(path string) (int, error) {
return Part2Opts(path, NoCache, WaitGroup) // fastest approach on my machine
}
func Part2Opts(path string, cache CacheStrategy, proc ParallelStrategy) (int, error) {
if cache == Naive && proc != ForLoop {
return 0, fmt.Errorf("invalid strategy combination")
}
positions, err := utils.ReadIntCsv(path)
if err != nil {
return 0, err
}
maxPosition := utils.MaxInt(positions...)
solutions := make([]int, maxPosition+1)
costFunction := makeFuelStrategy(cache)
processor := makeProcessor(proc)
processor(positions, maxPosition, costFunction, solutions)
return utils.MinInt(solutions...), nil
}
func rawFuelCost(distance int) int {
// https://en.wikipedia.org/wiki/1_%2B_2_%2B_3_%2B_4_%2B_%E2%8B%AF
return (distance * (distance + 1)) / 2
}
type CacheStrategy int
const (
NoCache CacheStrategy = iota
Mutex
RWMutex
Naive
)
type costFunction = func(int) int
func makeFuelStrategy(strategy CacheStrategy) costFunction {
if strategy == NoCache {
return rawFuelCost
}
fuelCostCache := make(map[int]int)
if strategy == Naive {
return func(distance int) int {
cached, ok := fuelCostCache[distance]
if ok {
return cached
}
cost := rawFuelCost(distance)
fuelCostCache[distance] = cost
return cost
}
}
if strategy == Mutex {
var mutex sync.Mutex
return func(distance int) int {
mutex.Lock()
defer mutex.Unlock()
cached, ok := fuelCostCache[distance]
if ok {
return cached
}
cost := rawFuelCost(distance)
fuelCostCache[distance] = cost
return cost
}
}
if strategy == RWMutex {
var rwMutex sync.RWMutex
return func(distance int) int {
rwMutex.RLock()
cached, ok := fuelCostCache[distance]
rwMutex.RUnlock()
if ok {
return cached
}
rwMutex.Lock()
defer rwMutex.Unlock()
cached, ok = fuelCostCache[distance]
if ok {
return cached
}
cost := rawFuelCost(distance)
fuelCostCache[distance] = cost
return cost
}
}
return nil
}
type ParallelStrategy int
const (
ForLoop ParallelStrategy = iota
WaitGroup
)
type processor = func([]int, int, costFunction, []int)
func makeProcessor(strategy ParallelStrategy) processor {
if strategy == ForLoop {
return forLoopProcessor
}
if strategy == WaitGroup {
return waitGroupProcessor
}
return nil
}
func forLoopProcessor(positions []int, maxPosition int, cost costFunction, solutions []int) {
for target := 0; target <= maxPosition; target++ {
fuel := 0
for _, position := range positions {
distance := utils.AbsInt(position - target)
fuel += cost(distance)
}
solutions[target] = fuel
}
}
func waitGroupProcessor(positions []int, maxPosition int, cost costFunction, solutions []int) {
var wg sync.WaitGroup
for target := 0; target <= maxPosition; target++ {
wg.Add(1)
go func(target int) {
defer wg.Done()
fuel := 0
for _, position := range positions {
distance := utils.AbsInt(position - target)
fuel += cost(distance)
}
solutions[target] = fuel
}(target)
}
wg.Wait()
} | day7/part2.go | 0.532668 | 0.512815 | part2.go | starcoder |
package datety
import "time"
// IsSameDay returns true if both dates are on the same day, same month and same year
func IsSameDay(t1, t2 time.Time) bool {
t1 = t1.UTC()
t2 = t2.UTC()
y1, m1, d1 := t1.Date()
y2, m2, d2 := t2.Date()
return y1 == y2 && m1 == m2 && d1 == d2
}
// IsSameMonth return true if both date are on the same month and year
func IsSameMonth(t1, t2 time.Time) bool {
t1 = t1.UTC()
t2 = t2.UTC()
y1, m1, _ := t1.Date()
y2, m2, _ := t2.Date()
return y1 == y2 && m1 == m2
}
// IsSameYear returns true if both date are on the same year
func IsSameYear(t1, t2 time.Time) bool {
t1 = t1.UTC()
t2 = t2.UTC()
y1, _, _ := t1.Date()
y2, _, _ := t2.Date()
return y1 == y2
}
// IsSamWithinThreshold return true if t1 is between t2 - threshold AND t2 + threshold
func IsSamWithinThreshold(t1, t2 time.Time, threshold time.Duration) bool {
if t1.Equal(t2) {
return true
}
if t1.After(t2.Add(-1*threshold)) && t1.Before(t2.Add(threshold)) {
return true
}
return false
}
// IsToday return true if date is today
func IsToday(date time.Time) bool {
return IsSameDay(date, time.Now())
}
// NumberOfMonths return the number of month separating from to
func NumberOfMonths(from time.Time, to time.Time) int {
if from.After(to) {
return 0
}
if (from.Month() == to.Month()) && (from.Year() == to.Year()) {
return 0
}
return 1 + NumberOfMonths(from.AddDate(0, 1, 0), to)
}
// NumberOfDays return the number of days separating from, to
func NumberOfDays(from, to time.Time) int {
if from.After(to) {
return 0
}
if IsSameDay(from, to) {
return 0
}
return 1 + NumberOfDays(from.AddDate(0, 0, 1), to)
}
// NumberOfDays return the number of hours between from and two
func NumberOfHours(from, to time.Time) int {
d := to.Sub(from)
return int(d / time.Hour)
}
// TodayAtMidnight return today's date floored to midnight
func TodayAtMidnight() time.Time {
return DayFloor(time.Now())
}
// BeginningOfMonth returns the the time of the first day of the month of t
func BeginningOfMonth(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
// HourFloor return the time with min:sec:nsec to 0:0:0
func HourFloor(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
}
// DayFloor set the day to midnight
func DayFloor(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
}
// Ceil returns the time with the hour set to 23:59:59:9999999
func Ceil(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 23, 59, 59, 9999999, t.Location())
} | datety.go | 0.820073 | 0.768321 | datety.go | starcoder |
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
"google.golang.org/protobuf/reflect/protoreflect"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
m1 := extensionFieldsOf(em1.Addr().Interface())
m2 := extensionFieldsOf(em2.Addr().Interface())
if !equalExtensions(v1.Type(), m1, m2) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
m1 := extensionFieldsOf(em1.Addr().Interface())
m2 := extensionFieldsOf(em2.Addr().Interface())
if !equalExtensions(v1.Type(), m1, m2) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.Proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
func equalExtensions(base reflect.Type, em1, em2 *extensionMap) bool {
if em1.Len() != em2.Len() {
return false
}
equal := true
em1.Range(func(extNum protoreflect.FieldNumber, e1 Extension) bool {
if !em2.Has(extNum) {
equal = false
return false
}
e2 := em2.Get(extNum)
m1 := extensionAsLegacyType(e1.GetValue())
m2 := extensionAsLegacyType(e2.GetValue())
if m1 == nil && m2 == nil {
return true
}
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
equal = false
return false
}
return true
}
equal = false
return false
})
return equal
} | vendor/github.com/golang/protobuf/proto/equal.go | 0.703855 | 0.400456 | equal.go | starcoder |
package splines
import (
"bytes"
"fmt"
"math"
"time"
"github.com/spencer-p/surfdash/pkg/noaa"
)
// Curve represents a curve that links a tide event to another smoothly. Its
// derivitative at Start and End are zero and it is undefined outside Start and
// End.
type Curve struct {
Start, End time.Time
a, b, c, d float64
}
// A Spline is a slice of curves linked together to form a full picture.
type Spline []Curve
// CurvesBetween identifies curves to link NOAA tide predictions.
func CurvesBetween(preds noaa.Predictions) Spline {
if len(preds) < 2 {
return nil
}
curves := make([]Curve, len(preds)-1)
for i := 0; i < len(preds)-1; i++ {
curves[i] = curveBetween(
time.Time(preds[i].Time),
float64(preds[i].Height),
time.Time(preds[i+1].Time),
float64(preds[i+1].Height))
}
return curves
}
// Discrete finds n tide predictions within the tide predictions described by a
// Spline.
func Discrete(spline Spline, n int) []float64 {
if len(spline) < 1 {
return nil
}
start := []Curve(spline)[0].Start
end := []Curve(spline)[len(spline)-1].End
dur := end.Sub(start)
step := time.Duration(float64(dur) / float64(n-1))
result := make([]float64, n)
for i := range result {
result[i] = spline.Eval(start.Add(step * time.Duration(i)))
}
return result
}
func curveBetween(time1 time.Time, h1 float64, time2 time.Time, h2 float64) Curve {
t1 := 0.0
t2 := xrel(time1, time2)
denominator := math.Pow(t1-t2, 3.0)
a := (-2 * (h1 - h2)) / denominator
b := (3 * (h1 - h2) * (t1 + t2)) / denominator
c := (-6 * (h1 - h2) * t1 * t2) / denominator
d := -1 * (-1*h2*math.Pow(t1, 3) + 3*h2*math.Pow(t1, 2)*t2 - 3*h1*t1*math.Pow(t2, 2) + h1*math.Pow(t2, 3)) / denominator
curve := Curve{
Start: time1,
End: time2,
a: a,
b: b,
c: c,
d: d,
}
return curve
}
func (s Spline) Eval(t time.Time) float64 {
n := len(s)
left, right := 0, n
for right > left {
mid := left + (right-left)/2
if t.Before(s[mid].Start) {
right = mid
} else if t.After(s[mid].End) {
left = mid
} else {
return s[mid].Eval(t)
}
}
// Function not defined.
return math.NaN()
}
func (c Curve) Eval(t time.Time) float64 {
if t.Before(c.Start) || t.After(c.End) {
return math.NaN()
}
x := xrel(c.Start, t)
return c.a*x*x*x + c.b*x*x + c.c*x + c.d
}
// xrel computes an x coordinate for t that is relative to origin.
// This reduces large floating point errors by moving x coordinates closer to
// the "origin" (just the start of a particular curve).
func xrel(origin time.Time, t time.Time) float64 {
return float64(t.Unix() - origin.Unix())
}
func (c Curve) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
_, err := fmt.Fprintf(&buf, `{"start":%d,"end":%d,"a":%g,"b":%g,"c":%g,"d":%g}`,
c.Start.Unix(), c.End.Unix(),
c.a, c.b, c.c, c.d)
return buf.Bytes(), err
} | pkg/noaa/splines/spline.go | 0.779741 | 0.437343 | spline.go | starcoder |
package pso
import (
"math"
"math/rand"
"time"
)
// Range of values.
type Range struct {
min []float64
max []float64
}
// Create a new range.
func NewRange(min, max []float64) *Range {
switch {
case min == nil:
panic("min cannot be nil.")
case max == nil:
panic("max cannot be nil.")
case len(min) != len(max):
panic("length of min and max have to be same.")
}
return &Range{min, max}
}
// Get either the vector is in this range or not.
func (r *Range) In(vector []float64) bool {
switch {
case vector == nil:
panic("vector cannot be nil")
}
if len(vector) != len(r.min) {
panic("length of values have to be same with minx and max.")
}
for i := range vector {
if vector[i] < r.min[i] || vector[i] > r.max[i] {
return false
}
}
return true
}
func (r *Range) Min() []float64 {
if r.min == nil {
return nil
}
cpy := make([]float64, len(r.min))
copy(cpy, r.min)
return cpy
}
func (r *Range) Max() []float64 {
if r.max == nil {
return nil
}
cpy := make([]float64, len(r.max))
copy(cpy, r.max)
return cpy
}
// It is type of particle which find a result of optimization.
type Particle struct {
// Current position
position []float64
// Current velocity
velocity []float64
// range of position
valuesRange *Range
// evaluated value by target function
evalValue float64
// local best of this particle
best []float64
}
// Create a new particle.
func NewParticle(position, velocity []float64, valuesRange *Range) *Particle {
switch {
case position == nil:
panic("position cannot be nil.")
case velocity == nil:
panic("velocity cannot be nil.")
case len(position) != len(velocity):
panic("length of position and velocity have to be same.")
}
cpyPos := make([]float64, len(position))
copy(cpyPos, position)
cpyVelocity := make([]float64, len(velocity))
copy(cpyVelocity, velocity)
best := make([]float64, len(position))
copy(best, position)
return &Particle{cpyPos, cpyVelocity, valuesRange, math.MaxFloat64, best}
}
// Get the position of particle on the solution space.
func (p *Particle) Position() []float64 {
return p.position
}
// Get the position of particle.
func (p *Particle) Velocity() []float64 {
return p.velocity
}
// Get the range of position.
func (p *Particle) Range() *Range {
return p.valuesRange
}
// Get the evaluated value by target function.
func (p *Particle) EvalValue() float64 {
return p.evalValue
}
// Get the local best of the particle.
func (p *Particle) Best() []float64 {
if p.best == nil {
return nil
}
cpy := make([]float64, len(p.best))
return cpy
}
// Do a step of the particle.
func (p *Particle) Step(f TargetFunc, param *Param, globalBest []float64) {
switch {
case f == nil:
panic("f cannot be nil.")
case param == nil:
panic("param cannot be nil.")
case globalBest == nil:
panic("globalBest cannot be nil.")
case len(globalBest) != len(p.position):
panic("length of particle position and globalBest have to be same")
}
oldPosition := make([]float64, len(p.position))
c1 := param.C1()
c2 := param.C2()
w := param.W()
copy(oldPosition, p.position)
// random generator
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
for i := range p.position {
// move
p.position[i] += p.velocity[i]
// Random value
r1 := rnd.Float64()
r2 := rnd.Float64()
// update velocity
p.velocity[i] = w[i] * p.velocity[i] + r1 * c1[i] * (p.best[i] - p.position[i]) * r2 * c2[i] * (globalBest[i] - p.position[i])
}
// Over the range?
if !p.valuesRange.In(p.position) {
copy(p.position, oldPosition)
}
// Update best
p.evalValue = f(p.position)
bestValue := f(p.best)
if p.evalValue < bestValue {
copy(p.best, p.position)
}
} | particle.go | 0.715623 | 0.533762 | particle.go | starcoder |
package common
import (
"math/big"
)
// Polynomial with coefficients in Z_prime. Coefficients are given as [a_0, a_1, ..., a_degree] where
// polynomial is p(x) = a_0 + a_1 * x + ... + a_degree * x^degree
type Polynomial struct {
coefficients []*big.Int
degree int
prime *big.Int // coefficients are in Z_prime
}
func NewRandomPolynomial(degree int, prime *big.Int) (*Polynomial, error) {
var coefficients []*big.Int
for i := 0; i <= degree; i++ {
coef := GetRandomInt(prime) // coeff has to be < prime
coefficients = append(coefficients, coef)
}
polynomial := Polynomial{
coefficients: coefficients,
degree: degree,
prime: prime,
}
return &polynomial, nil
}
func (polynomial *Polynomial) SetCoefficient(coeff_ind int, coefficient *big.Int) {
polynomial.coefficients[coeff_ind] = coefficient
}
// Computes polynomial values at given points.
func (polynomial *Polynomial) GetValues(points []*big.Int) map[*big.Int]*big.Int {
m := make(map[*big.Int]*big.Int)
for _, value := range points {
m[value] = polynomial.GetValue(value)
}
return m
}
// Computes polynomial values at given point.
func (polynomial *Polynomial) GetValue(point *big.Int) *big.Int {
value := big.NewInt(0)
for i, coeff := range polynomial.coefficients {
// a_i * point^i
tmp := new(big.Int).Exp(point, big.NewInt(int64(i)), polynomial.prime) // point^i % prime
tmp.Mul(coeff, tmp)
value.Add(value, tmp)
}
value.Mod(value, polynomial.prime)
return value
}
// Given degree+1 points which are on the polynomial, LagrangeInterpolation computes p(a).
func LagrangeInterpolation(a *big.Int, points map[*big.Int]*big.Int, prime *big.Int) *big.Int {
value := big.NewInt(0)
for key, val := range points {
numerator := big.NewInt(1)
denominator := big.NewInt(1)
t := new(big.Int)
for key1 := range points {
if key == key1 {
continue
}
t.Sub(a, key1)
numerator.Mul(numerator, t)
numerator.Mod(numerator, prime)
t.Sub(key, key1)
denominator.Mul(denominator, t)
denominator.Mod(denominator, prime)
}
t1 := new(big.Int)
denominator_inv := new(big.Int)
denominator_inv.ModInverse(denominator, prime)
t1.Mul(numerator, denominator_inv)
t1.Mod(t1, prime)
t2 := new(big.Int)
// (prime + value + t1 * val) % prime
t2.Mul(val, t1)
t2.Add(t2, prime)
t2.Add(t2, prime)
value.Add(value, t2)
value.Add(value, prime)
}
value.Mod(value, prime)
return value
} | common/polynomials.go | 0.796411 | 0.781997 | polynomials.go | starcoder |
package gomaasapi
import (
"encoding/json"
"errors"
"fmt"
)
// JSONObject is a wrapper around a JSON structure which provides
// methods to extract data from that structure.
// A JSONObject provides a simple structure consisting of the data types
// defined in JSON: string, number, object, list, and bool. To get the
// value you want out of a JSONObject, you must know (or figure out) which
// kind of value you have, and then call the appropriate Get*() method to
// get at it. Reading an item as the wrong type will return an error.
// For instance, if your JSONObject consists of a number, call GetFloat64()
// to get the value as a float64. If it's a list, call GetArray() to get
// a slice of JSONObjects. To read any given item from the slice, you'll
// need to "Get" that as the right type as well.
// There is one exception: a MAASObject is really a special kind of map,
// so you can read it as either.
// Reading a null item is also an error. So before you try obj.Get*(),
// first check obj.IsNil().
type JSONObject struct {
// Parsed value. May actually be any of the types a JSONObject can
// wrap, except raw bytes. If the object can only be interpreted
// as raw bytes, this will be nil.
value interface{}
// Raw bytes, if this object was parsed directly from an API response.
// Is nil for sub-objects found within other objects. An object that
// was parsed directly from a response can be both raw bytes and some
// other value at the same time.
// For example, "[]" looks like a JSON list, so you can read it as an
// array. But it may also be the raw contents of a file that just
// happens to look like JSON, and so you can read it as raw bytes as
// well.
bytes []byte
// Client for further communication with the API.
client Client
// Is this a JSON null?
isNull bool
}
// Our JSON processor distinguishes a MAASObject from a jsonMap by the fact
// that it contains a key "resource_uri". (A regular map might contain the
// same key through sheer coincide, but never mind: you can still treat it
// as a jsonMap and never notice the difference.)
const resourceURI = "resource_uri"
// maasify turns a completely untyped json.Unmarshal result into a JSONObject
// (with the appropriate implementation of course). This function is
// recursive. Maps and arrays are deep-copied, with each individual value
// being converted to a JSONObject type.
func maasify(client Client, value interface{}) JSONObject {
if value == nil {
return JSONObject{isNull: true}
}
switch value.(type) {
case string, float64, bool:
return JSONObject{value: value}
case map[string]interface{}:
original := value.(map[string]interface{})
result := make(map[string]JSONObject, len(original))
for key, value := range original {
result[key] = maasify(client, value)
}
return JSONObject{value: result, client: client}
case []interface{}:
original := value.([]interface{})
result := make([]JSONObject, len(original))
for index, value := range original {
result[index] = maasify(client, value)
}
return JSONObject{value: result}
}
msg := fmt.Sprintf("Unknown JSON type, can't be converted to JSONObject: %v", value)
panic(msg)
}
// Parse a JSON blob into a JSONObject.
func Parse(client Client, input []byte) (JSONObject, error) {
var obj JSONObject
if input == nil {
panic(errors.New("Parse() called with nil input"))
}
var parsed interface{}
err := json.Unmarshal(input, &parsed)
if err == nil {
obj = maasify(client, parsed)
obj.bytes = input
} else {
switch err.(type) {
case *json.InvalidUTF8Error:
case *json.SyntaxError:
// This isn't JSON. Treat it as raw binary data.
default:
return obj, err
}
obj = JSONObject{value: nil, client: client, bytes: input}
}
return obj, nil
}
// JSONObjectFromStruct takes a struct and converts it to a JSONObject
func JSONObjectFromStruct(client Client, input interface{}) (JSONObject, error) {
j, err := json.MarshalIndent(input, "", " ")
if err != nil {
return JSONObject{}, err
}
return Parse(client, j)
}
// Return error value for failed type conversion.
func failConversion(wantedType string, obj JSONObject) error {
msg := fmt.Sprintf("Requested %v, got %T.", wantedType, obj.value)
return errors.New(msg)
}
// MarshalJSON tells the standard json package how to serialize a JSONObject
// back to JSON.
func (obj JSONObject) MarshalJSON() ([]byte, error) {
if obj.IsNil() {
return json.Marshal(nil)
}
return json.MarshalIndent(obj.value, "", " ")
}
// With MarshalJSON, JSONObject implements json.Marshaler.
var _ json.Marshaler = (*JSONObject)(nil)
// IsNil tells you whether a JSONObject is a JSON "null."
// There is one irregularity. If the original JSON blob was actually raw
// data, not JSON, then its IsNil will return false because the object
// contains the binary data as a non-nil value. But, if the original JSON
// blob consisted of a null, then IsNil returns true even though you can
// still retrieve binary data from it.
func (obj JSONObject) IsNil() bool {
if obj.value != nil {
return false
}
if obj.bytes == nil {
return true
}
// This may be a JSON null. We can't expect every JSON null to look
// the same; there may be leading or trailing space.
return obj.isNull
}
// GetString retrieves the object's value as a string. If the value wasn't
// a JSON string, that's an error.
func (obj JSONObject) GetString() (value string, err error) {
value, ok := obj.value.(string)
if !ok {
err = failConversion("string", obj)
}
return
}
// GetFloat64 retrieves the object's value as a float64. If the value wasn't
// a JSON number, that's an error.
func (obj JSONObject) GetFloat64() (value float64, err error) {
value, ok := obj.value.(float64)
if !ok {
err = failConversion("float64", obj)
}
return
}
// GetMap retrieves the object's value as a map. If the value wasn't a JSON
// object, that's an error.
func (obj JSONObject) GetMap() (value map[string]JSONObject, err error) {
value, ok := obj.value.(map[string]JSONObject)
if !ok {
err = failConversion("map", obj)
}
return
}
// GetArray retrieves the object's value as an array. If the value wasn't a
// JSON list, that's an error.
func (obj JSONObject) GetArray() (value []JSONObject, err error) {
value, ok := obj.value.([]JSONObject)
if !ok {
err = failConversion("array", obj)
}
return
}
// GetBool retrieves the object's value as a bool. If the value wasn't a JSON
// bool, that's an error.
func (obj JSONObject) GetBool() (value bool, err error) {
value, ok := obj.value.(bool)
if !ok {
err = failConversion("bool", obj)
}
return
}
// GetBytes retrieves the object's value as raw bytes. A JSONObject that was
// parsed from the original input (as opposed to one that's embedded in
// another JSONObject) can contain both the raw bytes and the parsed JSON
// value, but either can be the case without the other.
// If this object wasn't parsed directly from the original input, that's an
// error.
// If the object was parsed from an original input that just said "null", then
// IsNil will return true but the raw bytes are still available from GetBytes.
func (obj JSONObject) GetBytes() ([]byte, error) {
if obj.bytes == nil {
return nil, failConversion("bytes", obj)
}
return obj.bytes, nil
} | vendor/github.com/juju/gomaasapi/jsonobject.go | 0.666388 | 0.464112 | jsonobject.go | starcoder |
package gremlingo
// TraversalStrategies is interceptor methods to alter the execution of the traversal (e.g. query re-writing).
type TraversalStrategies struct {
}
// GraphTraversalSource can be used to start GraphTraversal.
type GraphTraversalSource struct {
graph *Graph
traversalStrategies *TraversalStrategies
bytecode *bytecode
remoteConnection *DriverRemoteConnection
graphTraversal *GraphTraversal
}
// NewGraphTraversalSource creates a graph traversal source, the primary DSL of the Gremlin traversal machine.
func NewGraphTraversalSource(graph *Graph, traversalStrategies *TraversalStrategies, bytecode *bytecode, remoteConnection *DriverRemoteConnection) *GraphTraversalSource {
return &GraphTraversalSource{graph: graph, traversalStrategies: traversalStrategies, bytecode: bytecode, remoteConnection: remoteConnection}
}
// NewDefaultGraphTraversalSource creates a new graph GraphTraversalSource without a graph, strategy, or existing traversal.
func NewDefaultGraphTraversalSource() *GraphTraversalSource {
return &GraphTraversalSource{graph: nil, traversalStrategies: nil, bytecode: newBytecode(nil)}
}
// GetBytecode gets the traversal bytecode associated with this graph traversal source.
func (gts *GraphTraversalSource) GetBytecode() *bytecode {
return gts.bytecode
}
// GetGraphTraversal gets the graph traversal associated with this graph traversal source.
func (gts *GraphTraversalSource) GetGraphTraversal() *GraphTraversal {
return NewGraphTraversal(gts.graph, gts.traversalStrategies, newBytecode(gts.bytecode), gts.remoteConnection)
}
// GetTraversalStrategies gets the graph traversal strategies associated with this graph traversal source.
func (gts *GraphTraversalSource) GetTraversalStrategies() *TraversalStrategies {
return gts.traversalStrategies
}
func (gts *GraphTraversalSource) clone() *GraphTraversalSource {
return NewGraphTraversalSource(gts.graph, gts.traversalStrategies, newBytecode(gts.bytecode), gts.remoteConnection)
}
// WithBulk allows for control of bulking operations.
func (gts *GraphTraversalSource) WithBulk(args ...interface{}) *GraphTraversalSource {
source := gts.clone()
err := source.bytecode.addSource("withBulk", args...)
if err != nil {
return nil
}
return source
}
// WithPath adds a path to be used throughout the life of a spawned Traversal.
func (gts *GraphTraversalSource) WithPath(args ...interface{}) *GraphTraversalSource {
source := gts.clone()
source.bytecode.addSource("withPath", args...)
return source
}
// WithSack adds a sack to be used throughout the life of a spawned Traversal.
func (gts *GraphTraversalSource) WithSack(args ...interface{}) *GraphTraversalSource {
source := gts.clone()
source.bytecode.addSource("withSack", args...)
return source
}
// WithSideEffect adds a side effect to be used throughout the life of a spawned Traversal.
func (gts *GraphTraversalSource) WithSideEffect(args ...interface{}) *GraphTraversalSource {
source := gts.clone()
source.bytecode.addSource("withSideEffect", args...)
return source
}
// WithStrategies adds an arbitrary collection of TraversalStrategies instances to the traversal source.
func (gts *GraphTraversalSource) WithStrategies(args ...interface{}) *GraphTraversalSource {
source := gts.clone()
source.bytecode.addSource("withStrategies", args...)
return source
}
// WithoutStrategies removes an arbitrary collection of TraversalStrategies instances to the traversal source.
func (gts *GraphTraversalSource) WithoutStrategies(args ...interface{}) *GraphTraversalSource {
source := gts.clone()
source.bytecode.addSource("withoutStrategies", args...)
return source
}
// With provides a configuration to a traversal in the form of a key value pair.
func (gts *GraphTraversalSource) With(key interface{}, value interface{}) *GraphTraversalSource {
source := gts.clone()
source.bytecode.addSource("withStrategies", key, value)
return source
}
// WithRemote adds a remote to be used throughout the life of a spawned Traversal.
func (gts *GraphTraversalSource) WithRemote(remoteConnection *DriverRemoteConnection) *GraphTraversalSource {
gts.remoteConnection = remoteConnection
if gts.graphTraversal != nil {
gts.graphTraversal.remote = remoteConnection
}
return gts.clone()
}
// E reads edges from the graph to start the traversal.
func (gts *GraphTraversalSource) E(args ...interface{}) *GraphTraversal {
traversal := gts.GetGraphTraversal()
traversal.bytecode.addStep("E", args...)
return traversal
}
// V reads vertices from the graph to start the traversal.
func (gts *GraphTraversalSource) V(args ...interface{}) *GraphTraversal {
traversal := gts.GetGraphTraversal()
traversal.bytecode.addStep("V", args...)
return traversal
}
// AddE adds an Edge to start the traversal.
func (gts *GraphTraversalSource) AddE(args ...interface{}) *GraphTraversal {
traversal := gts.GetGraphTraversal()
traversal.bytecode.addStep("addE", args...)
return traversal
}
// AddV adds a Vertex to start the traversal.
func (gts *GraphTraversalSource) AddV(args ...interface{}) *GraphTraversal {
traversal := gts.GetGraphTraversal()
traversal.bytecode.addStep("addV", args...)
return traversal
}
// Inject inserts arbitrary objects to start the traversal.
func (gts *GraphTraversalSource) Inject(args ...interface{}) *GraphTraversal {
traversal := gts.GetGraphTraversal()
traversal.bytecode.addStep("inject", args...)
return traversal
}
// Io adds the io steps to start the traversal.
func (gts *GraphTraversalSource) Io(args ...interface{}) *GraphTraversal {
traversal := gts.GetGraphTraversal()
traversal.bytecode.addStep("io", args...)
return traversal
} | gremlin-go/driver/graphTraversalSource.go | 0.891841 | 0.515376 | graphTraversalSource.go | starcoder |
package search
import (
"database/sql"
"math"
"strconv"
"github.com/GaryBoone/GoStats/stats"
"github.com/kellydunn/golang-geo"
)
func fixFeatures(features map[string]float64) map[string]float64 {
fixedFeatures := map[string]float64{
"nearby": 0.0,
"accessible": 0.0,
"delicious": 0.0,
"accommodating": 0.0,
"affordable": 0.0,
"atmospheric": 0.0}
for name := range fixedFeatures {
if value, ok := features[name]; ok {
fixedFeatures[name] = value
}
}
return fixedFeatures
}
func fixModes(modes map[string]string) map[string]modeType {
fixedModes := map[string]modeType{
"nearby": modeTypeProd,
"accessible": modeTypeProd,
"delicious": modeTypeProd,
"accommodating": modeTypeProd,
"affordable": modeTypeProd,
"atmospheric": modeTypeProd}
for name := range fixedModes {
if value, ok := modes[name]; ok {
if mode, err := parseModeType(value); err == nil {
fixedModes[name] = mode
}
}
}
return fixedModes
}
func semanticSimilarity(features1 map[string]float64, features2 map[string]float64) float64 {
var result float64
for key, value1 := range features1 {
if value2, ok := features2[key]; ok {
result += value1 * value2
}
}
return result
}
func semanticCompare(features1 map[string]float64, features2 map[string]float64, modes map[string]modeType) float64 {
var result float64
for key, value1 := range features1 {
value2, _ := features2[key]
switch mode, _ := modes[key]; mode {
case modeTypeDist:
result += 1 - math.Abs(value1-value2)
case modeTypeProd:
result += value1 * value2
}
}
return result
}
func walkMatches(entries []record, features map[string]float64, modes map[string]modeType, minScore float64, callback func(record, float64)) {
for _, entry := range entries {
if score := semanticCompare(features, entry.features, modes); score >= minScore {
callback(entry, score)
}
}
}
func statRecords(entries []record, features map[string]float64, modes map[string]modeType, minScore float64) (float64, int) {
var (
compatibility float64
count int
)
walkMatches(entries, features, modes, minScore, func(entry record, score float64) {
compatibility += entry.Compatibility
count++
})
return compatibility, count
}
func stepRange(min, max float64, steps int, callback func(float64)) {
stepSize := (max - min) / float64(steps)
for i := 0; i < steps; i++ {
stepMax := max - stepSize*float64(i)
stepMin := stepMax - stepSize
stepMid := (stepMin + stepMax) / 2
callback(stepMid)
}
}
func findRecords(entries []record, features map[string]float64, modes map[string]modeType, minScore float64) []record {
var matchedEntries []record
walkMatches(entries, features, modes, minScore, func(entry record, score float64) {
entry.Score = score
matchedEntries = append(matchedEntries, entry)
})
return matchedEntries
}
func project(entries []record, features map[string]float64, modes map[string]modeType, featureName string, minScore float64, steps int) []projection {
sampleFeatures := make(map[string]float64)
for key, value := range features {
sampleFeatures[key] = value
}
var projections []projection
stepRange(-1.0, 1.0, steps, func(sample float64) {
sample, sampleFeatures[featureName] = sampleFeatures[featureName], sample
compatibility, count := statRecords(entries, sampleFeatures, modes, minScore)
sample, sampleFeatures[featureName] = sampleFeatures[featureName], sample
projections = append(projections, projection{compatibility, count, sample})
})
return projections
}
func computeRecordGeo(entries []record, context queryContext) {
var dist stats.Stats
for index := range entries {
entry := &entries[index]
if context.geo != nil {
userPoint := geo.NewPoint(context.geo.Latitude, context.geo.Longitude)
entryPoint := geo.NewPoint(entry.Geo.Latitude, context.geo.Longitude)
entry.DistanceToUser = userPoint.GreatCircleDistance(entryPoint)
}
dist.Update(entry.DistanceToUser)
}
distRange := dist.Max() - dist.Min()
distMean := dist.Mean()
for index := range entries {
entry := &entries[index]
var nearby float64
if distRange > 0.0 {
nearby = -((entry.DistanceToUser - distMean) / distRange)
}
var accessible float64
if context.walkingDist <= 0 {
accessible = -1.0
} else {
accessible = 1.0 - entry.DistanceToStn/context.walkingDist
accessible = math.Max(accessible, -1.0)
accessible = math.Min(accessible, 1.0)
}
entry.features["nearby"] = nearby
entry.features["accessible"] = accessible
}
}
func computeRecordCompat(db *sql.DB, entries []record, context queryContext) error {
for i := range entries {
entry := &entries[i]
historyRows, err := db.Query("SELECT id FROM history WHERE reviewId = (?)", entry.Id)
if err != nil {
return err
}
defer historyRows.Close()
var (
groupSum float64
groupCount int
)
for historyRows.Next() {
var historyId int
if err := historyRows.Scan(&historyId); err != nil {
return err
}
groupRows, err := db.Query("SELECT categoryId, categoryValue FROM historyGroups WHERE historyId = (?)", historyId)
if err != nil {
return err
}
defer groupRows.Close()
recordProfile := make(map[string]float64)
for groupRows.Next() {
var (
categoryId int
categoryValue float64
)
if err := groupRows.Scan(&categoryId, &categoryValue); err != nil {
return err
}
recordProfile[strconv.Itoa(categoryId)] = categoryValue
}
if err := groupRows.Err(); err != nil {
return err
}
groupSum += semanticSimilarity(recordProfile, context.profile)
groupCount++
}
if err := historyRows.Err(); err != nil {
return err
}
if groupCount > 0 {
entry.Compatibility = groupSum / float64(groupCount)
}
}
return nil
}
func fetchRecords(db *sql.DB, context queryContext) ([]record, error) {
rows, err := db.Query("SELECT name, address, delicious, accommodating, affordable, atmospheric, latitude, longitude, closestStnDist, closestStnName, accessCount, id FROM reviews")
if err != nil {
return nil, err
}
defer rows.Close()
var entries []record
for rows.Next() {
var (
name, address, closestStn string
delicious, accommodating, affordable, atmospheric float64
latitude, longitude, distanceToStn float64
accessCount, id int
)
rows.Scan(
&name,
&address,
&delicious,
&accommodating,
&affordable,
&atmospheric,
&latitude,
&longitude,
&distanceToStn,
&closestStn,
&accessCount,
&id,
)
entry := record{
Name: name,
Address: address,
DistanceToStn: distanceToStn,
ClosestStn: closestStn,
AccessCount: accessCount,
Geo: geoData{latitude, longitude},
Id: id,
}
entry.features = map[string]float64{
"delicious": delicious,
"accommodating": accommodating,
"affordable": affordable,
"atmospheric": atmospheric,
}
entries = append(entries, entry)
}
if err := rows.Err(); err != nil {
return nil, err
}
computeRecordGeo(entries, context)
if err := computeRecordCompat(db, entries, context); err != nil {
return nil, err
}
return entries, nil
} | util.go | 0.694303 | 0.479747 | util.go | starcoder |
package analysis
import (
"fmt"
"strings"
"github.com/google/gapid/gapil/semantic"
)
// Value interface compliance checks.
var (
_ = Value(&EnumValue{})
_ = SetRelational(&EnumValue{})
)
// Labels is a map of value to name.
type Labels map[uint64]string
// Merge adds all the labels from o into l.
func (l Labels) Merge(o Labels) {
for i, s := range o {
l[i] = s
}
}
// EnumValue is an implementation of Value that represents all the possible
// values of an enumerator.
type EnumValue struct {
Ty *semantic.Enum
Numbers *UintValue
Labels Labels
}
// Print returns a textual representation of the value.
func (v *EnumValue) Print(results *Results) string {
return v.String()
}
func (v *EnumValue) String() string {
bias := uintBias(v.Ty)
parts := []string{}
add := func(i uint64) {
s, ok := v.Labels[i]
if !ok {
s = fmt.Sprintf("%#x", bias(i))
}
parts = append(parts, s)
}
for _, r := range v.Numbers.Ranges {
if r.End-r.Start < 10 {
for i := r.Start; i != r.End; i++ {
add(i)
}
} else {
add(r.Start)
parts = append(parts, "...")
add(r.End - 1)
}
}
return fmt.Sprintf("[%v]", strings.Join(parts, ", "))
}
// Type returns the semantic type of the integer value represented by v.
func (v *EnumValue) Type() semantic.Type {
return v.Ty
}
// GreaterThan returns the possibility of v being greater than o.
// o must be of type *EnumValue.
func (v *EnumValue) GreaterThan(o Value) Possibility {
return v.Numbers.GreaterThan(o.(*EnumValue).Numbers)
}
// GreaterEqual returns the possibility of v being greater or equal to o.
// o must be of type *EnumValue.
func (v *EnumValue) GreaterEqual(o Value) Possibility {
return v.Numbers.GreaterEqual(o.(*EnumValue).Numbers)
}
// LessThan returns the possibility of v being less than o.
// o must be of type *EnumValue.
func (v *EnumValue) LessThan(o Value) Possibility {
return v.Numbers.LessThan(o.(*EnumValue).Numbers)
}
// LessEqual returns the possibility of v being less than or equal to o.
// o must be of type *EnumValue.
func (v *EnumValue) LessEqual(o Value) Possibility {
return v.Numbers.LessEqual(o.(*EnumValue).Numbers)
}
// SetGreaterThan returns a new value that represents the range of possible
// values in v that are greater than the lowest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetGreaterThan(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetGreaterThan(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// SetGreaterEqual returns a new value that represents the range of possible
// values in v that are greater than or equal to the lowest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetGreaterEqual(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetGreaterEqual(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// SetLessThan returns a new value that represents the range of possible
// values in v that are less than to the highest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetLessThan(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetLessThan(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// SetLessEqual returns a new value that represents the range of possible
// values in v that are less than or equal to the highest in o.
// o must be of type *EnumValue.
func (v *EnumValue) SetLessEqual(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.SetLessEqual(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Equivalent returns true iff v and o are equivalent.
// Unlike Equals() which returns the possibility of two values being equal,
// Equivalent() returns true iff the set of possible values are exactly
// equal.
// o must be of type *EnumValue.
func (v *EnumValue) Equivalent(o Value) bool {
if v == o {
return true
}
a, b := v, o.(*EnumValue)
if !a.Numbers.Equivalent(b.Numbers) {
return false
}
if len(a.Labels) != len(b.Labels) {
return false
}
for i, v := range a.Labels {
if b.Labels[i] != v {
return false
}
}
return true
}
// Equals returns the possibility of v being equal to o.
// o must be of type *EnumValue.
func (v *EnumValue) Equals(o Value) Possibility {
if v == o && v.Valid() {
return True
}
a, b := v, o.(*EnumValue)
return a.Numbers.Equals(b.Numbers)
}
// Valid returns true if there is any possibility of this value equaling
// any other.
func (v *EnumValue) Valid() bool {
return v.Numbers.Valid()
}
// Union (∪) returns the values that are found in v or o.
// o must be of type *EnumValue.
func (v *EnumValue) Union(o Value) Value {
if v == o {
return v
}
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.Union(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Intersect (∩) returns the values that are found in both v and o.
// o must be of type *EnumValue.
func (v *EnumValue) Intersect(o Value) Value {
if v == o {
return v
}
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.Intersect(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Difference (\) returns the values that are found in v but not found in o.
// o must be of type *EnumValue.
func (v *EnumValue) Difference(o Value) Value {
a, b := v, o.(*EnumValue)
return &EnumValue{
Ty: a.Ty,
Numbers: a.Numbers.Difference(b.Numbers).(*UintValue),
Labels: a.joinLabels(b),
}
}
// Clone returns a copy of v with a unique pointer.
func (v *EnumValue) Clone() Value {
out := &EnumValue{
Ty: v.Ty,
Numbers: v.Numbers.Clone().(*UintValue),
Labels: make(Labels, len(v.Labels)),
}
for i, s := range v.Labels {
out.Labels[i] = s
}
return out
}
func (v *EnumValue) joinLabels(o *EnumValue) Labels {
out := make(Labels, len(v.Labels)+len(o.Labels))
out.Merge(v.Labels)
out.Merge(o.Labels)
return out
} | gapil/analysis/enum_value.go | 0.817793 | 0.453988 | enum_value.go | starcoder |
package hammurabi
import (
"bufio"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
)
const (
requiredInput int = 3
)
const (
intro = `
Congratulations, you are the newest ruler of ancient Samaria, elected for %d-year term of office. Your duties are to dispsense food,
direct farming, and buy and sell land as needed to support your people. Watch out for rat infestations and the plague! Gain is the general
currency, measured in bushels. The following will help you in your decisions:
- Each person needs at least %d bushels of grain per year to survive.
- Each person can farm at most %d acres of land.
- It takes %d bushel of grain to farm an acre of land.
- The mark price for land fluctuates yearly.
Rule wisely and you will be showered with appreciation at the end of your term. Rule poorly and you will be kicked out of office!
`
)
// InteractiveHammurabi represents the minimal interface for an interactive Hammurabi game.
type InteractiveHammurabi interface {
DisplayIntro(year int) error
DisplayGameState(year int) error
ReadActionInput(reader *bufio.Reader) (*GameAction, error)
Hammurabi
}
// NewInteractiveHammurabi creates a new game with the maximum number of years, aka turns.
func NewInteractiveHammurabi(maxYear int) InteractiveHammurabi {
// Initialize a new game
return newGame(maxYear)
}
// DisplayIntro displays introduction text of the game.
func (g *game) DisplayIntro(year int) error {
// Validate
if year < 1 && year > g.year {
return &valueOutOfRange{kind: "year", reason: fmt.Sprintf("Should be within range [%d, %d].", 0, g.year)}
}
fmt.Printf(intro, year, bushelsPerPerson, landsPerPerson, bushelsPerLand)
return nil
}
// DisplayGameState displays textual representation of the game state and state delta.
func (g *game) DisplayGameState(year int) error {
// Get the current state from the given year
if year < 1 && year > g.year {
return &valueOutOfRange{kind: "year", reason: fmt.Sprintf("Should be within range [%d, %d].", 0, g.year)}
}
// Get the previous delta and the current state
delta := g.delta
state := g.state
// Display general information
fmt.Println()
fmt.Println("Hammurabi: I beg to report to you,")
fmt.Printf("In Year %d, %d people starved.\n", year, delta.PeopleStarved)
fmt.Printf("%d people came to the city.\n", delta.PeopleAdded)
fmt.Printf("The city population is now %d.\n", state.Population)
fmt.Printf("The city now owns %d acres.\n", state.Lands)
fmt.Printf("You harvested %d bushels per acre.\n", state.LandProfit)
if delta.HasRat {
fmt.Printf("Rats ate %d bushels.\n", delta.BushelsInfested)
}
if delta.HasPlague {
fmt.Printf("Plague killed %d people.\n", delta.PeopleKilled)
}
fmt.Printf("You now have %d bushels in store.\n", state.Bushels)
fmt.Printf("Land is trading at %d bushels per acre.\n", state.LandPrice)
// No error
return nil
}
// ReadActionInput reads the input and parse it to GameAction
func (g *game) ReadActionInput(reader *bufio.Reader) (action *GameAction, err error) {
fmt.Println()
fmt.Println("Input your action with the following format:")
fmt.Println("[LandsToBuy] [BushelsToFeed] [LandsToSeed]")
text, err := reader.ReadString('\n')
if err != nil {
return
}
// Initialize game action
input := strings.Fields(text)
if len(input) != requiredInput {
err = &invalidInput{}
return
}
// Parse the input
action = &GameAction{}
action.LandsToBuy, err = strconv.Atoi(input[0])
if err != nil {
err = errors.Wrap(err, "validation failed")
return
}
action.BushelsToFeed, err = strconv.Atoi(input[1])
if err != nil {
err = errors.Wrap(err, "validation failed")
return
}
action.LandsToSeed, err = strconv.Atoi(input[2])
if err != nil {
err = errors.Wrap(err, "validation failed")
return
}
// Otherwise set the action
g.action = action
return
} | pkg/hammurabi/interactive.go | 0.680454 | 0.42668 | interactive.go | starcoder |
package pathbuilding
import "sort"
// A TrustGraph is abstractly a directed graph (potentially with cycles). It represents the trust relationship between
// entities where are arrow represents a certificate signed by the source entity for the destination entity. A
// TrustGraph can also label some edges as "invalid" meaning that there is a certificate but it should be considered
// invalid, e.g. because it is expired.
type TrustGraph struct {
name string
nodes []string
edges []Edge
}
// An Edge in a TrustGraph
type Edge struct {
Source string
Destination string
}
func (e *Edge) Equals(other *Edge) bool {
return e.Source == other.Source && e.Destination == other.Destination
}
func (e *Edge) MemberOf(s []Edge) bool {
for _, other := range s {
if e.Equals(&other) {
return true
}
}
return false
}
// NewGraph creates a TrustGraph instance with the given edges, where all edges are considered valid
func NewGraph(name string, edges []Edge) *TrustGraph {
nodeNames := NewStringSet()
for _, edge := range edges {
nodeNames.Add(edge.Source)
nodeNames.Add(edge.Destination)
}
nodes := nodeNames.Values()
sort.Strings(nodes)
gEdges := make([]Edge, len(edges))
copy(gEdges, edges)
return &TrustGraph{
name: name,
nodes: nodes,
edges: gEdges,
}
}
func (g *TrustGraph) Name() string {
return g.name
}
// NodeNames returns a slice of all the names of nodes in the graph
func (g *TrustGraph) NodeNames() []string {
return g.nodes
}
// EdgeCount returns the number of edges in the graph (including both valid and invalid edges)
func (g *TrustGraph) EdgeCount() uint {
return uint(len(g.edges))
}
// GetAllEdges returns the edges from this graph
func (g *TrustGraph) GetAllEdges() []Edge {
res := make([]Edge, len(g.edges))
copy(res, g.edges)
return res
}
func stringInSlice(haystack []string, needle string) bool {
for _, val := range haystack {
if val == needle {
return true
}
}
return false
}
// Reachable returns a path if there is a path in the graph from the src node to the dst node, following only valid
// edges. If there is no path, this returns nil.
func (g *TrustGraph) Reachable(invalidEdges []Edge, src string, dst string) []string {
var dfsIterate func(path []string, start string) []string
dfsIterate = func(path []string, start string) []string {
if stringInSlice(path, start) {
return nil
}
newPath := append(path, start)
if start == dst {
return newPath
}
for _, edge := range g.edges {
if edge.Source != start {
continue
}
if edge.MemberOf(invalidEdges) {
continue
}
nextNode := edge.Destination
foundPath := dfsIterate(newPath, nextNode)
if foundPath != nil {
return foundPath
}
}
return nil
}
return dfsIterate(nil, src)
}
var LINEAR_TRUST_GRAPH = NewGraph("LINEAR_TRUST_GRAPH", []Edge{
{"ICA", "EE"},
{"Trust Anchor", "ICA"},
})
/*
https://datatracker.ietf.org/doc/html/rfc4158#section-2.3
+---------+
| Trust |
| Anchor |
+---------+
| |
v v
+---+ +---+
| A |<-->| C |
+---+ +---+
| |
| +---+ |
+->| B |<-+
+---+
|
v
+----+
| EE |
+----+
*/
var FIGURE_SEVEN = NewGraph("FIGURE_SEVEN", []Edge{
{"B", "EE"},
{"C", "B"},
{"A", "B"},
{"C", "A"},
{"A", "C"},
{"Trust Anchor", "C"},
{"Trust Anchor", "A"},
})
var TWO_ROOTS = NewGraph("TWO_ROOTS", []Edge{
{"ICA", "EE"},
{"Root1", "ICA"},
{"Root2", "ICA"},
})
/*
https://datatracker.ietf.org/doc/html/rfc4158#section-2.4
+---+ +---+
| F |--->| H |
+---+ +---+
^ ^ ^
| \ \
| \ \
| v v
| +---+ +---+
| | G |--->| I |
| +---+ +---+
| ^
| /
| /
+------+ +-----------+ +------+ +---+ +---+
| TA W |<----->| Bridge CA |<------>| TA X |-->| L |-->| M |
+------+ +-----------+ +------+ +---+ +---+
^ ^ \ \
/ \ \ \
/ \ \ \
v v v v
+------+ +------+ +---+ +---+
| TA Y | | TA Z | | J | | N |
+------+ +------+ +---+ +---+
/ \ / \ | |
/ \ / \ | |
/ \ / \ v v
v v v v +---+ +----+
+---+ +---+ +---+ +---+ | K | | EE |
| A |<--->| C | | O | | P | +---+ +----+
+---+ +---+ +---+ +---+
\ / / \ \
\ / / \ \
\ / v v v
v v +---+ +---+ +---+
+---+ | Q | | R | | S |
| B | +---+ +---+ +---+
+---+ |
/\ |
/ \ |
v v v
+---+ +---+ +---+
| E | | D | | T |
+---+ +---+ +---+
*/
var BRIDGE_CA_PKI = NewGraph("BRIDGE_CA_PKI", []Edge{
{"F", "H"},
{"F", "G"},
{"G", "F"},
{"H", "I"},
{"I", "H"},
{"G", "I"},
{"TA W", "F"},
{"TA W", "G"},
{"J", "K"},
{"N", "EE"},
{"L", "N"},
{"L", "M"},
{"TA X", "J"},
{"TA X", "L"},
{"B", "E"},
{"B", "D"},
{"A", "B"},
{"C", "B"},
{"A", "C"},
{"C", "A"},
{"TA Y", "A"},
{"TA Y", "C"},
{"R", "S"},
{"O", "R"},
{"O", "Q"},
{"P", "S"},
{"TA Z", "O"},
{"TA Z", "P"},
{"TA W", "Bridge CA"},
{"Bridge CA", "TA W"},
{"TA X", "Bridge CA"},
{"Bridge CA", "TA X"},
{"TA Y", "Bridge CA"},
{"Bridge CA", "TA Y"},
{"TA Z", "Bridge CA"},
{"Bridge CA", "TA Z"},
})
var ALL_TRUST_GRAPHS = []*TrustGraph{
TWO_ROOTS,
LINEAR_TRUST_GRAPH,
FIGURE_SEVEN,
BRIDGE_CA_PKI,
} | pathbuilding/trust_graph.go | 0.772659 | 0.425486 | trust_graph.go | starcoder |
package templates
const JsIndex = `var DS = require('dslink');
// creates a node with an action on it
var Increment = DS.createNode({
onInvoke: function(columns) {
// get current value of the link
var previous = link.val('/counter');
// set new value by adding an amount to the previous amount
link.val('/counter', previous + parseInt(columns.amount));
}
});
// Process the arguments and initializes the default nodes.
var link = new DS.LinkProvider(process.argv.slice(2), 'template-javascript-', {
defaultNodes: {
// counter is a value node, it holds the value of our counter
counter: {
$type: 'int',
'?value': 0
},
// increment is an action node, it will increment /counter
// by the specified amount
increment: {
// references the increment profile, which makes this node an instance of
// our Increment class
$is: 'increment',
$invokable: 'write',
// $params is the parameters that are passed to onInvoke
$params: [
{
name: 'amount',
type: 'int',
default: 1
}
]
}
},
// register our custom node here as a profile
// when we use $is with increment, it
// creates our Increment node
profiles: {
increment: function(path, provider) {
return new Increment(path, provider);
}
}
});
// Connect to the broker.
// link.connect() returns a Promise.
link.connect().catch(function(e) {
console.log(e.stack);
});
`
const JsInstall = `var fs = require('fs'),
path = require('path'),
crypto = require('crypto'),
child = require('child_process');
function npmInstall() {
var MD5_PATH = path.join(__dirname, ".dslink.md5");
var file = fs.readFileSync(path.join(__dirname, "package.json"));
var md5 = "";
if(fs.existsSync(MD5_PATH)) {
md5 = fs.readFileSync(MD5_PATH).toString("utf8");
}
var hash = crypto.createHash("md5");
hash.update(file);
var base = hash.digest("base64");
if(base !== md5) {
fs.writeFileSync(MD5_PATH, base);
var npm = child.exec("npm install --production");
console.log("running npm install");
npm.stdout.on('data', function(data) {
console.log(data);
});
}
}
npmInstall();
`
const JsPackageJson = `{
"name": "dslink-{{.Lang}}-{{.Name}}",
"version": "0.0.1",
"description": "A template to kickstart creating a DSLink using the JavaScript SDK.",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "https://github.com/IOT-DSA/dslink-javascript-template.git"
},
"author": "",
"license": "Apache",
"bugs": {
"url": "<Bugs Url>"
},
"homepage": "<Homepage Url>",
"dependencies": {
"dslink": "^1.0.0"
}
}
` | templates/js_templates.go | 0.504883 | 0.409752 | js_templates.go | starcoder |
package codec
import (
"math"
"github.com/fileformats/graphics/jt/model"
)
type DeeringCodec struct {
lookupTable *deeringLookupTable
numBits float64
}
func NewDeeringCodec(numBits int) *DeeringCodec {
return &DeeringCodec{
numBits: float64(numBits),
lookupTable: newDeeringLookupTable(),
}
}
type deeringCode struct {
sextant int64
octant int64
theta int64
psi int64
}
type deeringLookupTable struct {
nBits float64
cosTheta []float64
sinTheta []float64
cosPsi []float64
sinPsi []float64
}
func (c *DeeringCodec) ToVector3D(sextant, octant, theta, psi uint32) model.Vector3D {
if c.lookupTable == nil {
c.lookupTable = newDeeringLookupTable()
}
if c.numBits == 0 {
c.numBits = 6
}
theta += sextant & 1
cosTheta, sinTheta, cosPsi, sinPsi := c.lookupTable.lookupThetaPsi(float64(theta), float64(psi), c.numBits)
vector := model.Vector3D{
X: float32(cosTheta * cosPsi),
Y: float32(sinPsi),
Z: float32(sinTheta * cosPsi),
}
switch sextant {
case 0:
case 1:
vector.Z, vector.X = vector.X, vector.Z
case 2:
vector.Z, vector.X, vector.Y = vector.X, vector.Y, vector.Z
case 3:
vector.Y, vector.X = vector.X, vector.Y
case 4:
vector.Y, vector.Z, vector.X = vector.X, vector.Y, vector.Z
case 5:
vector.Z, vector.Y = vector.Y, vector.Z
}
if octant & 0x4 == 0 {
vector.X = -vector.X
}
if octant & 0x2 == 0 {
vector.Y = -vector.Y
}
if octant & 0x1 == 0 {
vector.Z = -vector.Z
}
return vector
}
func (tbl *deeringLookupTable) lookupThetaPsi(theta, psi, count float64) (cosTheta float64, sinTheta float64, cosPsi float64, sinPsi float64) {
offset := uint(tbl.nBits - count)
offTheta := (int(theta) << offset) & 0xFFFFFFFF
offPsi := (int(psi) << offset) & 0xFFFFFFFF
return tbl.cosTheta[offTheta], tbl.sinTheta[offTheta], tbl.cosPsi[offPsi], tbl.sinPsi[offPsi]
}
func newDeeringLookupTable() *deeringLookupTable {
tbl := &deeringLookupTable{
nBits: 8,
cosTheta: []float64{},
sinTheta: []float64{},
cosPsi: []float64{},
sinPsi: []float64{},
}
var tblSize float64 = 256
psiMax := 0.615479709
for i := 0; i <= int(tblSize); i++ {
theta := math.Asin(math.Tan(psiMax * (tblSize - float64(i)) / tblSize))
psi := psiMax * (float64(i) / tblSize)
tbl.cosTheta = append(tbl.cosTheta, math.Cos(theta))
tbl.sinTheta = append(tbl.sinTheta, math.Sin(theta))
tbl.cosPsi = append(tbl.cosPsi, math.Cos(psi))
tbl.sinPsi = append(tbl.sinPsi, math.Sin(psi))
}
return tbl
} | jt/codec/deering_codec.go | 0.594787 | 0.512266 | deering_codec.go | starcoder |
package strings
import (
"reflect"
"sort"
"strings"
)
// Set is a representation of a set of strings.
// If you have a []string that you want to do a lot of
// set operations on, prefer using this type.
// If you only have a one-off usage, use SliceContains.
type Set map[string]bool
// Equal reports whether expect and actual contain exactly the same strings,
// without regard to order.
func Equal(expect, actual []string) bool {
if len(expect) == 0 && len(actual) == 0 {
return true
}
if len(expect) == 0 || len(actual) == 0 {
return false
}
e := make([]string, len(expect))
a := make([]string, len(actual))
copy(e, expect)
copy(a, actual)
sort.Strings(e)
sort.Strings(a)
return reflect.DeepEqual(e, a)
}
// Contains reports whether item is an element of list.
// If you expect to do this a lot, prefer converting
// to a Set. This is fine for one-offs.
func Contains(list []string, item string) bool {
for _, v := range list {
if item == v {
return true
}
}
return false
}
// New returns a new Set containing the given strings.
func New(ss ...string) Set {
set := make(Set)
for _, s := range ss {
set[s] = true
}
return set
}
// Intersect returns a Set representing the intersection of
// the two slices.
func (s Set) Intersect(s2 Set) Set {
newS := make(Set)
for str := range s {
if s2[str] {
newS[str] = true
}
}
return newS
}
// AddSet modifies s in-place to include all the elements of addSet.
func (s Set) AddSet(addSet Set) Set {
for val, _ := range addSet {
s[val] = true
}
return s
}
// AddSlice modifies s in-place to include all the elements of slice.
func (s Set) AddSlice(slice []string) Set {
for _, val := range slice {
s[val] = true
}
return s
}
// Add modifies s in-place to include str.
func (s Set) Add(str string) Set {
s[str] = true
return s
}
// RemoveSlice modifies s in-place to remove all the elements of slice.
func (s Set) RemoveSlice(slice []string) Set {
for _, val := range slice {
delete(s, val)
}
return s
}
// RemoveSet modifies s in-place to remove all the elements of removeSet.
func (s Set) RemoveSet(removeSet Set) Set {
for val, _ := range removeSet {
delete(s, val)
}
return s
}
// Remove modifies s in-place to remove str.
func (s Set) Remove(str string) {
delete(s, str)
}
// Contains reports whether val is a member of s.
func (s Set) Contains(val string) bool {
return s[val]
}
// Reports whether the set is empty.
func (s Set) IsEmpty() bool {
return len(s) == 0
}
// ToSlice returns a slice representation of s.
func (s Set) ToSlice() []string {
var keys []string
for k := range s {
keys = append(keys, k)
}
return keys
}
// String returns a string representation of s.
func (s Set) String() string {
return strings.Join(s.ToSlice(), ",")
} | shipshape/util/strings/strings.go | 0.81468 | 0.459864 | strings.go | starcoder |
package bridge
import (
"log"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage"
"github.com/rapidloop/sop/model"
"github.com/rapidloop/sop/sopdb"
)
// Closer is an object that has a Close() method that needs to be called to
// free up resources.
type Closer interface {
Close() error
}
//------------------------------------------------------------------------------
/*
// SeriesIterator iterates over the data of a time series.
type SeriesIterator interface {
// Seek advances the iterator forward to the value at or after
// the given timestamp.
Seek(t int64) bool
// At returns the current timestamp/value pair.
At() (t int64, v float64)
// Next advances the iterator by one.
Next() bool
// Err returns the current error.
Err() error
}
*/
// SeriesIterator iterates over the data of a time series.
type SeriesIterator struct {
id int
tsdb sopdb.TSDB
it sopdb.Iterator
}
func NewSeriesIterator(seriesID int, tsdb sopdb.TSDB) *SeriesIterator {
return &SeriesIterator{
id: seriesID,
tsdb: tsdb,
it: tsdb.Iterate(seriesID),
}
}
// Seek advances the iterator forward to the value at or after
// the given timestamp.
func (s *SeriesIterator) Seek(t int64) bool {
return s.it.Seek(uint64(t))
}
// At returns the current timestamp/value pair.
func (s *SeriesIterator) At() (t int64, v float64) {
var tt uint64
tt, v = s.it.At()
t = int64(tt)
return
}
// Next advances the iterator by one.
func (s *SeriesIterator) Next() bool {
return s.it.Next()
}
// Err returns the current error.
func (s *SeriesIterator) Err() error {
return nil
}
// Close frees up allocated resources.
func (s *SeriesIterator) Close() error {
s.it.Close()
return nil
}
//------------------------------------------------------------------------------
/*
// Series represents a single time series.
type Series interface {
// Labels returns the complete set of labels identifying the series.
Labels() labels.Labels
// Iterator returns a new iterator of the data of the series.
Iterator() SeriesIterator
}
*/
// Series represents a single time series.
type Series struct {
tsdb sopdb.TSDB
labs labels.Labels
id int
closers []Closer
}
func NewSeries(metric model.Metric, seriesID int, tsdb sopdb.TSDB) *Series {
m := make(map[string]string)
for _, lv := range metric {
m[lv.Name] = lv.Value
}
return &Series{
tsdb: tsdb,
labs: labels.FromMap(m),
id: seriesID,
}
}
// Labels returns the complete set of labels identifying the series.
func (s *Series) Labels() labels.Labels {
return s.labs
}
// Iterator returns a new iterator of the data of the series.
func (s *Series) Iterator() storage.SeriesIterator {
sit := NewSeriesIterator(s.id, s.tsdb)
s.closers = append(s.closers, sit)
return sit
}
func (s *Series) Close() error {
for _, c := range s.closers {
c.Close()
}
return nil
}
//------------------------------------------------------------------------------
// SeriesSet contains a set of series.
type SeriesSet struct {
metrics []model.Metric
ids []int
index int
tsdb sopdb.TSDB
err error
closers []Closer
}
func NewSeriesSet(metrics []model.Metric, ids []int, tsdb sopdb.TSDB,
err error) *SeriesSet {
if len(metrics) != len(ids) {
panic("metrics and ids are of unequal cardinality")
}
return &SeriesSet{
metrics: metrics,
ids: ids,
tsdb: tsdb,
index: -1,
err: err,
}
}
func (ss *SeriesSet) Next() bool {
if (ss.index+1) >= 0 && (ss.index+1) < len(ss.metrics) {
ss.index++
return true
}
return false
}
func (ss *SeriesSet) At() storage.Series {
if ss.index >= len(ss.metrics) {
return nil
}
series := NewSeries(ss.metrics[ss.index], ss.ids[ss.index], ss.tsdb)
ss.closers = append(ss.closers, series)
return series
}
func (ss *SeriesSet) Err() error {
return ss.err
}
func (ss *SeriesSet) Close() error {
for _, c := range ss.closers {
c.Close()
}
return nil
}
//------------------------------------------------------------------------------
// Querier provides reading access to time series data.
type Querier struct {
indexdb sopdb.IndexDB
tsdb sopdb.TSDB
closers []Closer
}
func NewQuerier(indexdb sopdb.IndexDB, tsdb sopdb.TSDB) *Querier {
return &Querier{
indexdb: indexdb,
tsdb: tsdb,
}
}
// Select returns a set of series that matches the given label matchers.
func (q *Querier) Select(mm ...*labels.Matcher) storage.SeriesSet {
// compile matchers to a label expression
terms := make([]model.LabelOp, len(mm))
for i, m := range mm {
terms[i].Name = m.Name
terms[i].Value = m.Value
terms[i].Op = int(m.Type) // just happen to have the same values.. :-)
}
var expr model.LabelExpr
if err := expr.CompileFromTerms(terms); err != nil {
log.Printf("bad query %v: %v", terms, err)
return NewSeriesSet(nil, nil, q.tsdb, err)
}
// execute query
ids, err := q.indexdb.Query(expr)
if err != nil {
log.Printf("query failed: %v: %v", expr, err)
return NewSeriesSet(nil, nil, q.tsdb, err)
}
// lookup ids
metrics := make([]model.Metric, len(ids))
for i, id := range ids {
metrics[i], err = q.indexdb.Lookup(id)
if err != nil {
log.Printf("failed to lookup id %d: %v", id, err)
return NewSeriesSet(nil, nil, q.tsdb, err)
}
}
s := NewSeriesSet(metrics, ids, q.tsdb, nil)
q.closers = append(q.closers, s)
return s
}
// LabelValues returns all potential values for a label name.
func (q *Querier) LabelValues(name string) ([]string, error) {
panic("Not Implemented!")
}
// Close releases the resources of the Querier.
func (q *Querier) Close() error {
for _, c := range q.closers {
c.Close()
}
return nil
}
//------------------------------------------------------------------------------
/*
// Queryable allows opening a storage querier.
type Queryable interface {
Querier(mint, maxt int64) (storage.Querier, error)
}
*/
//------------------------------------------------------------------------------
type QueryEngine struct {
db sopdb.DB
e *promql.Engine
}
func NewQueryEngine(db sopdb.DB) *QueryEngine {
self := &QueryEngine{db: db}
self.e = promql.NewEngine(self, nil)
return self
}
func (q *QueryEngine) Querier(mint, maxt int64) (storage.Querier, error) {
return NewQuerier(q.db.Index(), q.db.TS()), nil
}
func (q *QueryEngine) PromEngine() *promql.Engine {
return q.e
} | bridge/storage.go | 0.739328 | 0.433262 | storage.go | starcoder |
package values
import "image/color"
type Color struct {
Primary color.NRGBA
Primary50 color.NRGBA
PrimaryHighlight color.NRGBA
// text colors
Text color.NRGBA // default color #091440
InvText color.NRGBA // inverted default color #ffffff
GrayText1 color.NRGBA // darker shade #3D5873
GrayText2 color.NRGBA // lighter shade of GrayText1 #596D81
GrayText3 color.NRGBA // lighter shade of GrayText2 #8997A5 (hint)
GrayText4 color.NRGBA // lighter shade of GrayText3 ##C4CBD2
GreenText color.NRGBA // green text #41BE53
// background colors
Background color.NRGBA
Black color.NRGBA
BlueProgressTint color.NRGBA
Danger color.NRGBA
DeepBlue color.NRGBA
LightBlue color.NRGBA
LightBlue2 color.NRGBA
LightBlue3 color.NRGBA
LightBlue4 color.NRGBA
LightBlue5 color.NRGBA
LightBlue6 color.NRGBA
Gray1 color.NRGBA
Gray2 color.NRGBA
Gray3 color.NRGBA
Gray4 color.NRGBA
Gray5 color.NRGBA
Green50 color.NRGBA
Green500 color.NRGBA
Orange color.NRGBA
Orange2 color.NRGBA
Orange3 color.NRGBA
OrangeRipple color.NRGBA
Success color.NRGBA
Success2 color.NRGBA
Surface color.NRGBA
SurfaceHighlight color.NRGBA
Turquoise100 color.NRGBA
Turquoise300 color.NRGBA
Turquoise700 color.NRGBA
Turquoise800 color.NRGBA
Yellow color.NRGBA
White color.NRGBA
}
func (c *Color) DarkThemeColors() {
c.Primary = rgb(0x57B6FF)
// text colors
c.Text = argb(0x99FFFFFF)
c.GrayText1 = argb(0xDEFFFFFF)
c.GrayText2 = argb(0x99FFFFFF)
c.GrayText3 = argb(0x61FFFFFF)
c.GrayText4 = argb(0x61FFFFFF)
// background colors
c.DeepBlue = argb(0x99FFFFFF)
c.Gray1 = argb(0x99FFFFFF)
c.Gray2 = rgb(0x3D3D3D)
c.Gray3 = rgb(0x8997a5)
c.Gray4 = rgb(0x121212)
c.Gray5 = rgb(0x363636)
c.Surface = rgb(0x252525)
}
func (c *Color) DefualtThemeColors() *Color {
c.Primary = rgb(0x2970ff)
c.Primary50 = rgb(0xE3F2FF)
c.PrimaryHighlight = rgb(0x1B41B3)
// text colors
c.Text = rgb(0x091440)
c.InvText = rgb(0xffffff)
c.GrayText1 = rgb(0x3d5873)
c.GrayText2 = rgb(0x596D81)
c.GrayText3 = rgb(0x8997a5) //hint
c.GrayText4 = rgb(0xc4cbd2)
c.GreenText = rgb(0x41BE53)
// background colors
c.Background = argb(0x22444444)
c.Black = rgb(0x000000)
c.BlueProgressTint = rgb(0x73d7ff)
c.Danger = rgb(0xed6d47)
c.DeepBlue = rgb(0x091440)
c.LightBlue = rgb(0xe4f6ff)
c.LightBlue2 = rgb(0x75D8FF)
c.LightBlue3 = rgb(0xBCE8FF)
c.LightBlue4 = rgb(0xBBDEFF)
c.LightBlue5 = rgb(0x70CBFF)
c.LightBlue6 = rgb(0x4B91D8)
c.Gray1 = rgb(0x3d5873) // darkest gray #3D5873 (icon color)
c.Gray2 = rgb(0xe6eaed) // light 0xe6eaed
c.Gray3 = rgb(0xc4cbd2) // InactiveGray #C4CBD2
c.Gray4 = rgb(0xf3f5f6) //active n light gray combined f3f5f6
c.Gray5 = rgb(0xf3f5f6)
c.Green50 = rgb(0xE8F7EA)
c.Green500 = rgb(0x41BE53)
c.Orange = rgb(0xD34A21)
c.Orange2 = rgb(0xF8E8E7)
c.Orange3 = rgb(0xF8CABC)
c.OrangeRipple = rgb(0xD32F2F)
c.Success = rgb(0x41bf53)
c.Success2 = rgb(0xE1F8EF)
c.Surface = rgb(0xffffff)
c.Turquoise100 = rgb(0xB6EED7)
c.Turquoise300 = rgb(0x2DD8A3)
c.Turquoise700 = rgb(0x00A05F)
c.Turquoise800 = rgb(0x008F52)
c.Yellow = rgb(0xffc84e)
c.White = rgb(0xffffff)
return c
}
func rgb(c uint32) color.NRGBA {
return argb(0xff000000 | c)
}
func argb(c uint32) color.NRGBA {
return color.NRGBA{A: uint8(c >> 24), R: uint8(c >> 16), G: uint8(c >> 8), B: uint8(c)}
} | ui/values/colors.go | 0.577734 | 0.439567 | colors.go | starcoder |
package httptesting
import (
"net/http"
"github.com/golib/assert"
)
// AssertStatus asserts that the response status code is equal to value.
func (r *Request) AssertStatus(status int) bool {
return assert.EqualValues(r.t, status, r.Response.StatusCode,
"Expected response status code of %d, but got %d",
status,
r.Response.StatusCode,
)
}
// AssertOK asserts that the response status code is 200.
func (r *Request) AssertOK() bool {
return r.AssertStatus(http.StatusOK)
}
// AssertForbidden asserts that the response status code is 403.
func (r *Request) AssertForbidden() bool {
return r.AssertStatus(http.StatusForbidden)
}
// AssertNotFound asserts that the response status code is 404.
func (r *Request) AssertNotFound() bool {
return r.AssertStatus(http.StatusNotFound)
}
// AssertInternalError asserts that the response status code is 500.
func (r *Request) AssertInternalError() bool {
return r.AssertStatus(http.StatusInternalServerError)
}
// AssertHeader asserts that the response includes named header with value.
func (r *Request) AssertHeader(name, value string) bool {
actual := r.Response.Header.Get(name)
return assert.EqualValues(r.t, value, actual,
"Expected response header contains %s of %s, but got %s",
http.CanonicalHeaderKey(name),
value,
actual,
)
}
// AssertContentType asserts that the response includes Content-Type header with value.
func (r *Request) AssertContentType(contentType string) bool {
return r.AssertHeader("Content-Type", contentType)
}
// AssertExistHeader asserts that the response includes named header.
func (r *Request) AssertExistHeader(name string) bool {
name = http.CanonicalHeaderKey(name)
_, ok := r.Response.Header[name]
if !ok {
assert.Fail(r.t, "Response header: "+name+" (*required)",
"Expected response header includes %s",
name,
)
}
return ok
}
// AssertNotExistHeader asserts that the response does not include named header.
func (r *Request) AssertNotExistHeader(name string) bool {
name = http.CanonicalHeaderKey(name)
_, ok := r.Response.Header[name]
if ok {
assert.Fail(r.t, "Response header: "+name+" (*not required)",
"Expected response header does not include %s",
name,
)
}
return !ok
}
// AssertEmpty asserts that the response body is empty.
func (r *Request) AssertEmpty() bool {
return assert.Empty(r.t, string(r.ResponseBody))
}
// AssertNotEmpty asserts that the response body is not empty.
func (r *Request) AssertNotEmpty() bool {
return assert.NotEmpty(r.t, string(r.ResponseBody))
}
// AssertContains asserts that the response body contains the string.
func (r *Request) AssertContains(s string) bool {
return assert.Contains(r.t, string(r.ResponseBody), s,
"Expected response body contains %q",
s,
)
}
// AssertNotContains asserts that the response body does not contain the string.
func (r *Request) AssertNotContains(s string) bool {
return assert.NotContains(r.t, string(r.ResponseBody), s,
"Expected response body does not contain %q",
s,
)
}
// AssertMatch asserts that the response body matches the regular expression.
func (r *Request) AssertMatch(re string) bool {
return assert.Match(r.t, re, r.ResponseBody,
"Expected response body matches regexp %q",
re,
)
}
// AssertNotMatch asserts that the response body does not match the regular expression.
func (r *Request) AssertNotMatch(re string) bool {
return assert.NotMatch(r.t, re, r.ResponseBody,
"Expected response body does not match regexp %q",
re,
)
}
// AssertContainsJSON asserts that the response body contains JSON value of the key.
func (r *Request) AssertContainsJSON(key string, value interface{}) bool {
return assert.ContainsJSON(r.t, string(r.ResponseBody), key, value)
}
// AssertNotContainsJSON asserts that the response body dose not contain JSON value of the key.
func (r *Request) AssertNotContainsJSON(key string) bool {
return assert.NotContainsJSON(r.t, string(r.ResponseBody), key)
} | request_assertions.go | 0.732018 | 0.466663 | request_assertions.go | starcoder |
package common
import "math"
type GraphNode struct {
location Location
edges []Node
}
func newNode(location Location) Node {
graphNode := GraphNode{}
graphNode.location = location
graphNode.edges = make([]Node, 0)
return graphNode
}
func (g GraphNode) getLocation() Location {
return g.location
}
func (g GraphNode) isNeighbor(that Node) bool {
if that == nil {
return false
}
wasSuccessful := true
thatX, thatY, thatZ := that.getLocation().As3DCoordinates()
thisX, thisY, thisZ := g.getLocation().As3DCoordinates()
dx := math.Abs(float64(int64(thatX) - int64(thisX)))
dy := math.Abs(float64(int64(thatY) - int64(thisY)))
dz := math.Abs(float64(int64(thatZ) - int64(thisZ)))
isDxVarying := dx == gridStep
isDyVarying := dy == gridStep
isDzConstant := dz != gridStep
isDxNotZero := dx != 0
isDyNotZero := dy != 0
isDzNotZero := dz != 0
if isDxVarying {
if isDyNotZero || isDzNotZero {
wasSuccessful = false
}
} else {
if isDyVarying {
if isDxNotZero || isDzNotZero {
wasSuccessful = false
}
} else {
if isDzConstant || isDxNotZero || isDyNotZero {
wasSuccessful = false
}
}
}
return wasSuccessful
}
func (g GraphNode) connect(that Node) (bool, Node, Node) {
if that == nil {
return false, g, that
}
wasSuccessful := g.isNeighbor(that)
for _, ed := range g.edges {
if ed.compare(that) {
wasSuccessful = false
}
}
if wasSuccessful {
g.edges = append(g.edges, that)
_, that, _ = that.connect(g)
g.edges = append(g.edges[:len(g.edges)-1], that)
}
return wasSuccessful, g, that
}
func (g GraphNode) disconnect(that Node) (bool, Node, Node) {
if that == nil {
return false, g, that
}
wasSuccessful := false
for i, ed := range g.edges {
if ed.compare(that) {
wasSuccessful = true
g.edges = append(g.edges[:i], g.edges[i+1:]...)
_, that, _ = that.disconnect(g)
}
}
return wasSuccessful, g, that
}
func (g GraphNode) getConnected() []Node {
connected := make([]Node, 0)
connected = append(connected, g.edges...)
return connected
}
func (g GraphNode) compare(that Node) bool {
if that == nil {
return false
}
return g.getLocation().Compare(that.getLocation())
}
func (g GraphNode) hardCompare(that Node) bool {
if !g.compare(that) {
return false
}
thisConnected := g.getConnected()
thatConnected := that.getConnected()
if len(thisConnected) != len(thatConnected) {
return false
}
// should have the same connected Nodes
isEqual := true
for _, thisConnectedNode := range thisConnected {
equalFound := false
for _, thatConnectedNode := range thatConnected {
if (thisConnectedNode).compare(thatConnectedNode) {
equalFound = true
}
}
if !equalFound {
isEqual = false
}
}
return isEqual
} | common/graphNode.go | 0.651133 | 0.434701 | graphNode.go | starcoder |
package types
type DataInputAvailability int
// Matcher describes a type that can produce a match result from a set of matching data.
type Matcher interface {
Match(MatchingData) (Result, error)
}
// OnMatch is a node in the match tree, either describing an action (leaf node) or
// the start of a subtree (internal node).
type OnMatch struct {
Matcher Matcher
Action Action
}
// Action describes an opaque action that is the final result of a match. Implementations would likely
// need to cast this to a more appropriate type.
type Action interface{}
// MatchingData describes an opaque set of input data.
type MatchingData interface{}
// Result describes the result of evaluating the match tree.
type Result struct {
// MatchResult is the final result, if NeedMoreData is false. This can be nil if the match tree completed
// but no action was resolved.
MatchResult *OnMatch
// NeedMoreData specified whether the match tree failed to resolve due to input data not being available yet.
// This can imply that as more data is made available, a match might be found.
NeedMoreData bool
}
const (
// NotAvailable indicates that the data input is not available.
NotAvailable DataInputAvailability = iota
// MoreDataMightBeAvailable indicates that there might be more data available.
MoreDataMightBeAvailable DataInputAvailability = iota
// AllDataAvailable indicates that all data is present, no more data will be added.
AllDataAvailable DataInputAvailability = iota
)
// DataInputResult describes the result of evaluating a DataInput.
type DataInputResult struct {
// Availability describes the kind of data availability the associated data has.
Availability DataInputAvailability
// Data is the resulting data. This might be nil if the data is not available or if the
// backing data is available but the requested value does not.
Data *string
}
// DataInput describes a type that can extract an input value from the MatchingData.
type DataInput interface {
Input(MatchingData) (DataInputResult, error)
} | xdsmatcher/pkg/matcher/types/types.go | 0.670393 | 0.498535 | types.go | starcoder |
package Euler2D
import (
"fmt"
"math"
"sort"
"sync"
"github.com/notargets/gocfd/DG2D"
"github.com/notargets/gocfd/utils"
)
type VertexToElement [][3]int32 // Vertex id is the first int32, element ID is the next, threadID third
func (ve VertexToElement) Len() int { return len(ve) }
func (ve VertexToElement) Swap(i, j int) { ve[i], ve[j] = ve[j], ve[i] }
func (ve VertexToElement) Less(i, j int) bool { return ve[i][0] < ve[j][0] }
func (ve VertexToElement) Sort() { sort.Sort(ve) }
func NewVertexToElement(EtoV utils.Matrix) (VtoE VertexToElement) {
var (
Kmax, Nverts = EtoV.Dims()
)
if Nverts != 3 {
msg := fmt.Errorf("EtoV should have dimensions [Kmax,3] was [%d,%d]", Kmax, Nverts)
panic(msg)
}
VtoE = make(VertexToElement, Kmax*3)
var ii int
for k := 0; k < Kmax; k++ {
for i := 0; i < 3; i++ {
VtoE[ii] = [3]int32{int32(EtoV.At(k, i)), int32(k), 0}
ii++
}
}
VtoE.Sort()
return
}
func (ve VertexToElement) Shard(pm *PartitionMap) (veSharded []VertexToElement) {
var (
NPar = pm.ParallelDegree
lve = len(ve)
VertexPartitions = NewPartitionMap(NPar, lve) // This has to be re-done to honor vertex grouping
ib int
vNum int32
)
veSharded = make([]VertexToElement, NPar)
approxBucketSize := VertexPartitions.GetBucketDimension(0)
getShardedPair := func(vve [3]int32, pm *PartitionMap) (vves [3]int32) {
nodeIDSharded, _, threadID := pm.GetLocalK(int(vve[1]))
vves = [3]int32{vve[0], int32(nodeIDSharded), int32(threadID)}
return
}
_ = getShardedPair
for np := 0; np < NPar; np++ {
for i := 0; i < approxBucketSize; i++ {
veSharded[np] = append(veSharded[np], getShardedPair(ve[ib], pm))
//veSharded[np] = append(veSharded[np], ve[ib])
ib++
if ib == lve {
return
}
}
vNum = ve[ib][0]
for ib < lve && ve[ib][0] == vNum {
veSharded[np] = append(veSharded[np], getShardedPair(ve[ib], pm))
//veSharded[np] = append(veSharded[np], ve[ib])
ib++
if ib == lve {
return
}
}
}
return
}
type ScalarDissipation struct {
VtoE []VertexToElement // Sharded vertex to element map, [2] is [vertID, ElementID_Sharded]
EtoV []utils.Matrix // Sharded Element to Vertex map, Kx3
Epsilon []utils.Matrix // Sharded Np x Kmax, Interpolated from element vertices
EpsilonScalar [][]float64 // Sharded scalar value of dissipation, one per element
DissDOF, DissDOF2, DissDiv []utils.Matrix // Sharded NpFlux x Kmax, DOF for Gradient calculation using RT
DissX, DissY []utils.Matrix // Sharded NpFlux x Kmax, X and Y derivative of dissipation field
EpsVertex []float64 // NVerts x 1, Aggregated (Max) of epsilon surrounding each vertex, Not sharded
PMap *PartitionMap // Partition map for the solution shards in K
U, UClipped []utils.Matrix // Sharded scratch areas for assembly and testing of solution values
Clipper utils.Matrix // Matrix used to clip the topmost mode from the solution polynomial, used in shockfinder
dfr *DG2D.DFR2D
S0, Kappa float64
BaryCentricCoords utils.Matrix // A thruple(lam0,lam1,lam2) for interpolation for each interior point, Npx3
VertexEpsilonValues []utils.Matrix
}
func NewScalarDissipation(kappa float64, dfr *DG2D.DFR2D, pm *PartitionMap) (sd *ScalarDissipation) {
var (
NPar = pm.ParallelDegree
el = dfr.SolutionElement
Np = el.Np
NpFlux = dfr.FluxElement.Np
order = float64(el.N)
NVerts = dfr.VX.Len()
)
_ = order
sd = &ScalarDissipation{
EpsVertex: make([]float64, NVerts),
EpsilonScalar: make([][]float64, NPar), // Viscosity, constant over the element
Epsilon: make([]utils.Matrix, NPar), // Epsilon field, expressed over solution points
VertexEpsilonValues: make([]utils.Matrix, NPar), // Epsilon field, expressed over solution points
DissDOF: make([]utils.Matrix, NPar),
DissDOF2: make([]utils.Matrix, NPar),
DissDiv: make([]utils.Matrix, NPar),
DissX: make([]utils.Matrix, NPar),
DissY: make([]utils.Matrix, NPar),
VtoE: NewVertexToElement(dfr.Tris.EToV).Shard(pm),
PMap: pm,
dfr: dfr,
// Sharded working matrices
U: make([]utils.Matrix, NPar),
UClipped: make([]utils.Matrix, NPar),
S0: 1.0 / math.Pow(order, 4.),
Kappa: 2.,
//Kappa: 0.25,
//S0: 10.,
}
sd.EtoV = sd.shardEtoV(dfr.Tris.EToV)
sd.createInterpolationStencil()
if kappa != 0. {
sd.Kappa = kappa
}
for np := 0; np < NPar; np++ {
sd.U[np] = utils.NewMatrix(Np, 1)
sd.UClipped[np] = utils.NewMatrix(Np, 1)
Kmax := pm.GetBucketDimension(np)
sd.Epsilon[np] = utils.NewMatrix(NpFlux, Kmax)
sd.VertexEpsilonValues[np] = utils.NewMatrix(3, Kmax)
sd.EpsilonScalar[np] = make([]float64, Kmax)
sd.DissDOF[np] = utils.NewMatrix(NpFlux, Kmax)
sd.DissDOF2[np] = utils.NewMatrix(NpFlux, Kmax)
sd.DissDiv[np] = utils.NewMatrix(Np, Kmax)
sd.DissX[np] = utils.NewMatrix(NpFlux, Kmax)
sd.DissY[np] = utils.NewMatrix(NpFlux, Kmax)
}
/*
The "Clipper" matrix drops the last mode from the polynomial and forms an alternative field of values at the node
points based on a polynomial with one less term. In other words, if we have a polynomial of degree "p", expressed
as values at Np node points, multiplying the Node point values vector by Clipper produces an alternative version
of the node values based on truncating the last polynomial mode.
*/
{
data := make([]float64, Np)
for i := 0; i < Np; i++ {
if i != Np-1 {
data[i] = 1.
} else {
data[i] = 0.
}
}
diag := utils.NewDiagMatrix(Np, data)
sd.Clipper = sd.dfr.SolutionElement.V.Mul(diag).Mul(sd.dfr.SolutionElement.Vinv)
}
return
}
func (sd *ScalarDissipation) shardEtoV(EtoV utils.Matrix) (ev []utils.Matrix) {
var (
pm = sd.PMap
NP = pm.ParallelDegree
//KMax, _ = EtoV.Dims()
)
ev = make([]utils.Matrix, NP)
for np := 0; np < NP; np++ {
KMax := pm.GetBucketDimension(np)
ev[np] = utils.NewMatrix(KMax, 3)
kmin, kmax := pm.GetBucketRange(np)
var klocal int
for k := kmin; k < kmax; k++ {
ev[np].Set(klocal, 0, EtoV.At(k, 0))
ev[np].Set(klocal, 1, EtoV.At(k, 1))
ev[np].Set(klocal, 2, EtoV.At(k, 2))
klocal++
}
if klocal != KMax {
msg := fmt.Errorf("dimension incorrect, should be %d, is %d", KMax, klocal)
panic(msg)
}
}
return
}
func (sd *ScalarDissipation) propagateEpsilonMaxToVertices(myThread int, wg *sync.WaitGroup) {
var (
VtoE = sd.VtoE[myThread]
max = math.Max
)
oldVert := -1
for _, val := range VtoE {
vert, k, threadID := int(val[0]), int(val[1]), int(val[2])
if oldVert == vert { // we're in the middle of processing this vert, update normally
sd.EpsVertex[vert] = max(sd.EpsVertex[vert], sd.EpsilonScalar[threadID][k])
} else { // we're on a new vertex, reset the vertex value
sd.EpsVertex[vert] = sd.EpsilonScalar[threadID][k]
oldVert = vert
}
}
wg.Done()
}
type ContinuityLevel uint8
const (
No ContinuityLevel = iota
C0
C1
)
func (sd *ScalarDissipation) AddDissipation(c *Euler, cont ContinuityLevel, myThread int, Jinv, Jdet utils.Matrix, Q, RHSQ [4]utils.Matrix) {
/*
The dissipation term is in the form:
diss = epsilon*Grad(U)
dU/dT = -Div(Flux) + Div(diss)
dU/dT = -Div(Flux) + Div(epsilon*Grad(U))
dU/dT = -(Div(Flux) - Div(epsilon*Grad(U)))
dU/dT = -Div(Flux - epsilon*Grad(U))
*/
var (
dfr = sd.dfr
Kmax = sd.PMap.GetBucketDimension(myThread)
NpInt, NpFlux = dfr.FluxElement.Nint, dfr.FluxElement.Np
KmaxGlobal = sd.PMap.MaxIndex
EpsilonScalar = sd.EpsilonScalar[myThread]
Epsilon = sd.Epsilon[myThread]
DOF, DOF2 = sd.DissDOF[myThread], sd.DissDOF2[myThread]
DIV = sd.DissDiv[myThread]
DissX, DissY = sd.DissX[myThread], sd.DissY[myThread]
EtoV = sd.EtoV[myThread]
VertexEpsilonValues = sd.VertexEpsilonValues[myThread]
)
if cont == C0 {
// Interpolate epsilon within each element
for k := 0; k < Kmax; k++ {
tri := EtoV.DataP[3*k : 3*k+3]
v := [3]int{int(tri[0]), int(tri[1]), int(tri[2])}
for vert := 0; vert < 3; vert++ {
ind := k + vert*Kmax
VertexEpsilonValues.DataP[ind] = sd.EpsVertex[v[vert]]
}
}
sd.BaryCentricCoords.Mul(VertexEpsilonValues, Epsilon)
}
for n := 0; n < 4; n++ {
//c.GetSolutionGradient(myThread, n, Q, DissX, DissY, DOF, DOF2)
c.GetSolutionGradientUsingRTElement(myThread, n, Q, DissX, DissY, DOF, DOF2)
switch cont {
case No:
for k := 0; k < Kmax; k++ {
for i := 0; i < NpFlux; i++ {
ind := k + Kmax*i
DissX.DataP[ind] *= EpsilonScalar[k] // Scalar viscosity, constant within each k'th element
DissY.DataP[ind] *= EpsilonScalar[k]
}
}
case C0:
DissX.ElMul(Epsilon)
DissY.ElMul(Epsilon)
}
/*
Add the DissX and DissY to the RT_DOF using the contravariant transform for the interior
and IInII for the edges
*/
var (
DiXd, DiYd = DissX.DataP, DissY.DataP
NpEdge = dfr.FluxElement.Nedge
DOFd = DOF.DataP
)
for k := 0; k < Kmax; k++ {
var (
JdetD = Jdet.DataP[k]
JinvD = Jinv.DataP[4*k : 4*(k+1)]
IInIId = dfr.IInII.DataP
kGlobal = sd.PMap.GetGlobalK(k, myThread)
)
for i := 0; i < NpInt; i++ {
ind := k + Kmax*i
ind2 := k + Kmax*(i+NpInt)
DOFd[ind] = JdetD * (JinvD[0]*DiXd[ind] + JinvD[1]*DiYd[ind])
DOFd[ind2] = JdetD * (JinvD[2]*DiXd[ind] + JinvD[3]*DiYd[ind])
}
for edgeNum := 0; edgeNum < 3; edgeNum++ {
var (
fInd = kGlobal + KmaxGlobal*edgeNum
IInII = IInIId[fInd]
nx = dfr.FaceNorm[0].DataP[fInd]
ny = dfr.FaceNorm[1].DataP[fInd]
shift = NpEdge * edgeNum
)
for i := 0; i < NpEdge; i++ {
ind := k + (2*NpInt+i+shift)*Kmax
DOFd[ind] = (nx*DiXd[ind] + ny*DiYd[ind]) * IInII
}
}
}
sd.dfr.FluxElement.DivInt.Mul(DOF, DIV)
for k := 0; k < Kmax; k++ {
var (
oojd = 1. / Jdet.DataP[k]
)
for i := 0; i < NpInt; i++ {
ind := k + i*Kmax
RHSQ[n].DataP[ind] += oojd * DIV.DataP[ind]
}
}
}
}
func (sd *ScalarDissipation) linearInterpolateEpsilon(myThread int) {
var (
dfr = sd.dfr
Epsilon = sd.Epsilon[myThread]
EtoV = sd.EtoV[myThread]
NpFlux, KMax = dfr.FluxElement.Np, sd.PMap.GetBucketDimension(myThread)
R, S = dfr.FluxElement.R, dfr.FluxElement.S
)
vertLinear := func(r, s float64, f [3]float64) (fi float64) {
var (
rLen, sLen = 2., 2.
drFrac, dsFrac = (r - (-1.)) / rLen, (s - (-1.)) / sLen
dr, ds = f[1] - f[0], f[2] - f[0]
)
fi = dr*drFrac + ds*dsFrac + f[0]
return
}
// Interpolate epsilon within each element
for k := 0; k < KMax; k++ {
tri := EtoV.Row(k).DataP
v := [3]int{int(tri[0]), int(tri[1]), int(tri[2])}
eps := [3]float64{sd.EpsVertex[v[0]], sd.EpsVertex[v[1]], sd.EpsVertex[v[2]]}
for i := 0; i < NpFlux; i++ {
ind := k + KMax*i
Epsilon.DataP[ind] = vertLinear(R.DataP[i], S.DataP[i], eps)
}
}
}
func (sd *ScalarDissipation) baryCentricInterpolateEpsilon(myThread int) {
var (
dfr = sd.dfr
Np, KMax = dfr.SolutionElement.Np, sd.PMap.GetBucketDimension(myThread)
Epsilon = sd.Epsilon[myThread]
EtoV = sd.EtoV[myThread]
)
// Interpolate epsilon within each element
for k := 0; k < KMax; k++ {
tri := EtoV.Row(k).DataP
v := [3]int{int(tri[0]), int(tri[1]), int(tri[2])}
eps := [3]float64{sd.EpsVertex[v[0]], sd.EpsVertex[v[1]], sd.EpsVertex[v[2]]}
for i := 0; i < Np; i++ {
ind := k + KMax*i
bcc := sd.BaryCentricCoords.Row(i).DataP
Epsilon.DataP[ind] = bcc[0]*eps[0] + bcc[1]*eps[1] + bcc[2]*eps[2]
}
}
}
func (sd *ScalarDissipation) GetScalarEpsilonPlotField(c *Euler) (fld utils.Matrix) {
for np := 0; np < sd.PMap.ParallelDegree; np++ {
Np, KMax := sd.Epsilon[np].Dims()
for k := 0; k < KMax; k++ {
epsK := sd.EpsilonScalar[np][k]
for i := 0; i < Np; i++ {
ind := k + KMax*i
sd.Epsilon[np].DataP[ind] = epsK
}
}
}
fld = c.RecombineShardsK(sd.Epsilon)
return
}
func (sd *ScalarDissipation) GetC0EpsilonPlotField(c *Euler) (fld utils.Matrix) {
fld = c.RecombineShardsK(sd.Epsilon)
return
}
func (sd *ScalarDissipation) CalculateElementViscosity(Qall [][4]utils.Matrix) {
var (
wg = sync.WaitGroup{}
dfr = sd.dfr
)
for np := 0; np < sd.PMap.ParallelDegree; np++ {
wg.Add(1)
go func(myThread int) {
var (
Rho = Qall[myThread][0]
Eps = sd.EpsilonScalar[myThread]
Kmax = sd.PMap.GetBucketDimension(myThread)
U = sd.U[myThread]
UClipped = sd.UClipped[myThread]
KMaxGlobal = sd.PMap.MaxIndex
Order = float64(sd.dfr.N)
)
/*
Eps0 wants to be (h/p) and is supposed to be proportional to cell width
Something like this for the "h" quantity seems right
Np1 = c.dfr.N + 1
Np12 = float64(Np1 * Np1)
edgeLen = e.GetEdgeLength()
fs := 0.5 * Np12 * edgeLen / Jdet[bn].DataP[k]
*/
for k := 0; k < Kmax; k++ {
// Get edges for this element
kGlobal := sd.PMap.GetGlobalK(k, myThread)
var maxEdgeLen float64
maxEdgeLen = -1
for edgeNum := 0; edgeNum < 3; edgeNum++ {
ind := kGlobal + KMaxGlobal*edgeNum
edgeLen := dfr.IInII.DataP[ind]
if edgeLen > maxEdgeLen {
maxEdgeLen = edgeLen
}
}
var (
eps0 = maxEdgeLen / Order
Se = math.Log10(sd.moment(k, Kmax, U, UClipped, Rho))
left, right = sd.S0 - sd.Kappa, sd.S0 + sd.Kappa
oo2kappa = 0.5 / sd.Kappa
)
switch {
case Se < left:
Eps[k] = 0.
case Se >= left && Se <= right:
Eps[k] = 0.5 * eps0 * (1. + math.Sin(math.Pi*oo2kappa*(Se-sd.S0)))
case Se > right:
Eps[k] = eps0
}
}
wg.Done()
}(np)
}
wg.Wait()
for np := 0; np < sd.PMap.ParallelDegree; np++ {
wg.Add(1)
go sd.propagateEpsilonMaxToVertices(np, &wg)
}
wg.Wait()
}
func (sd *ScalarDissipation) moment(k, Kmax int, U, UClipped, Rho utils.Matrix) (m float64) {
var (
Np = sd.dfr.SolutionElement.Np
UD, UClippedD = U.DataP, UClipped.DataP
)
for i := 0; i < Np; i++ {
ind := k + i*Kmax
U.DataP[i] = Rho.DataP[ind]
}
/*
Evaluate the L2 moment of (q - qalt) over the element, where qalt is the truncated version of q
Here we don't bother using quadrature, we do a simple sum
*/
UClipped = sd.Clipper.Mul(U, UClipped)
for i := 0; i < Np; i++ {
t1 := UD[i] - UClippedD[i]
m += t1 * t1 / (UD[i] * UD[i])
}
return
}
func (sd *ScalarDissipation) createInterpolationStencil() {
var (
Np = sd.dfr.FluxElement.Np
R, S = sd.dfr.FluxElement.R, sd.dfr.FluxElement.S
RRinv utils.Matrix
err error
)
sd.BaryCentricCoords = utils.NewMatrix(Np, 3)
// Set up unit triangle matrix with vertices in order
RR := utils.NewMatrix(3, 3)
RR.Set(0, 0, 1.)
RR.Set(0, 1, 1.)
RR.Set(0, 2, 1.)
RR.Set(1, 0, -1)
RR.Set(2, 0, -1)
RR.Set(1, 1, 1)
RR.Set(2, 1, -1)
RR.Set(1, 2, -1)
RR.Set(2, 2, 1)
if RRinv, err = RR.Inverse(); err != nil {
panic(err)
}
C := utils.NewMatrix(3, 1)
C.DataP[0] = 1
for i := 0; i < Np; i++ {
C.DataP[1] = R.DataP[i]
C.DataP[2] = S.DataP[i]
LAM := RRinv.Mul(C)
for ii := 0; ii < 3; ii++ {
sd.BaryCentricCoords.DataP[ii+3*i] = LAM.DataP[ii]
}
}
} | model_problems/Euler2D/dissipation.go | 0.600423 | 0.427188 | dissipation.go | starcoder |
package pg_query
import "encoding/json"
/* ----------------
* ArrayRef: describes an array subscripting operation
*
* An ArrayRef can describe fetching a single element from an array,
* fetching a subarray (array slice), storing a single element into
* an array, or storing a slice. The "store" cases work with an
* initial array value and a source value that is inserted into the
* appropriate part of the array; the result of the operation is an
* entire new modified array value.
*
* If reflowerindexpr = NIL, then we are fetching or storing a single array
* element at the subscripts given by refupperindexpr. Otherwise we are
* fetching or storing an array slice, that is a rectangular subarray
* with lower and upper bounds given by the index expressions.
* reflowerindexpr must be the same length as refupperindexpr when it
* is not NIL.
*
* In the slice case, individual expressions in the subscript lists can be
* NULL, meaning "substitute the array's current lower or upper bound".
*
* Note: the result datatype is the element type when fetching a single
* element; but it is the array type when doing subarray fetch or either
* type of store.
*
* Note: for the cases where an array is returned, if refexpr yields a R/W
* expanded array, then the implementation is allowed to modify that object
* in-place and return the same object.)
* ----------------
*/
type ArrayRef struct {
Xpr Node `json:"xpr"`
Refarraytype Oid `json:"refarraytype"` /* type of the array proper */
Refelemtype Oid `json:"refelemtype"` /* type of the array elements */
Reftypmod int32 `json:"reftypmod"` /* typmod of the array (and elements too) */
Refcollid Oid `json:"refcollid"` /* OID of collation, or InvalidOid if none */
Refupperindexpr List `json:"refupperindexpr"` /* expressions that evaluate to upper
* array indexes */
Reflowerindexpr List `json:"reflowerindexpr"` /* expressions that evaluate to lower
* array indexes, or NIL for single array
* element */
Refexpr Node `json:"refexpr"` /* the expression that evaluates to an array
* value */
Refassgnexpr Node `json:"refassgnexpr"` /* expression for the source value, or NULL if
* fetch */
}
func (node ArrayRef) MarshalJSON() ([]byte, error) {
type ArrayRefMarshalAlias ArrayRef
return json.Marshal(map[string]interface{}{
"ArrayRef": (*ArrayRefMarshalAlias)(&node),
})
}
func (node *ArrayRef) UnmarshalJSON(input []byte) (err error) {
var fields map[string]json.RawMessage
err = json.Unmarshal(input, &fields)
if err != nil {
return
}
if fields["xpr"] != nil {
node.Xpr, err = UnmarshalNodeJSON(fields["xpr"])
if err != nil {
return
}
}
if fields["refarraytype"] != nil {
err = json.Unmarshal(fields["refarraytype"], &node.Refarraytype)
if err != nil {
return
}
}
if fields["refelemtype"] != nil {
err = json.Unmarshal(fields["refelemtype"], &node.Refelemtype)
if err != nil {
return
}
}
if fields["reftypmod"] != nil {
err = json.Unmarshal(fields["reftypmod"], &node.Reftypmod)
if err != nil {
return
}
}
if fields["refcollid"] != nil {
err = json.Unmarshal(fields["refcollid"], &node.Refcollid)
if err != nil {
return
}
}
if fields["refupperindexpr"] != nil {
node.Refupperindexpr.Items, err = UnmarshalNodeArrayJSON(fields["refupperindexpr"])
if err != nil {
return
}
}
if fields["reflowerindexpr"] != nil {
node.Reflowerindexpr.Items, err = UnmarshalNodeArrayJSON(fields["reflowerindexpr"])
if err != nil {
return
}
}
if fields["refexpr"] != nil {
node.Refexpr, err = UnmarshalNodeJSON(fields["refexpr"])
if err != nil {
return
}
}
if fields["refassgnexpr"] != nil {
node.Refassgnexpr, err = UnmarshalNodeJSON(fields["refassgnexpr"])
if err != nil {
return
}
}
return
} | nodes/array_ref.go | 0.688887 | 0.558869 | array_ref.go | starcoder |
package vector
import (
"bytes"
"io"
"os"
"sync/atomic"
"github.com/RoaringBitmap/roaring/roaring64"
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/types"
gvec "github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/matrixorigin/matrixone/pkg/encoding"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/buffer/base"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/common"
"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/container"
)
func StdVectorConstructor(vf common.IVFile, useCompress bool, freeFunc base.MemoryFreeFunc) base.IMemoryNode {
return NewStdVectorNode(vf, useCompress, freeFunc)
}
func NewStdVector(t types.Type, capacity uint64) *StdVector {
v := &StdVector{
BaseVector: BaseVector{
Type: t,
VMask: &nulls.Nulls{},
},
// Data: make([]byte, 0, capacity*uint64(t.Size)),
}
size := capacity * uint64(t.Size)
v.MNode = common.GPool.Alloc(size)
v.Data = v.MNode.Buf[:0:size]
return v
}
func NewStdVectorNode(vf common.IVFile, useCompress bool, freeFunc base.MemoryFreeFunc) base.IMemoryNode {
return &StdVector{
Data: make([]byte, 0),
File: vf,
UseCompress: useCompress,
FreeFunc: freeFunc,
BaseVector: BaseVector{
VMask: &nulls.Nulls{},
},
}
}
func NewEmptyStdVector() *StdVector {
return &StdVector{
Data: make([]byte, 0),
BaseVector: BaseVector{
VMask: &nulls.Nulls{},
},
}
}
func (v *StdVector) PlacementNew(t types.Type) {
v.Type = t
capacity := uint64(v.File.Stat().OriginSize())
if v.MNode != nil {
common.GPool.Free(v.MNode)
}
v.MNode = common.GPool.Alloc(capacity)
v.Data = v.MNode.Buf[:0:capacity]
}
func (v *StdVector) GetType() container.VectorType {
return container.StdVec
}
func (v *StdVector) Close() error {
if v.MNode != nil {
common.GPool.Free(v.MNode)
}
v.VMask = nil
v.Data = nil
return nil
}
func (v *StdVector) Capacity() int {
return cap(v.Data) / int(v.Type.Size)
}
func (v *StdVector) dataBytes() int {
return cap(v.Data)
}
func (v *StdVector) FreeMemory() {
if v.MNode != nil {
common.GPool.Free(v.MNode)
}
if v.FreeFunc != nil {
v.FreeFunc(v)
}
}
func (v *StdVector) GetMemorySize() uint64 {
v.RLock()
defer v.RUnlock()
return uint64(len(v.Data))
}
func (v *StdVector) GetMemoryCapacity() uint64 {
if v.UseCompress {
return uint64(v.File.Stat().Size())
} else {
return uint64(v.File.Stat().OriginSize())
}
}
func (v *StdVector) SetValue(idx int, val interface{}) error {
if idx >= v.Length() || idx < 0 {
return ErrVecInvalidOffset
}
if v.IsReadonly() {
return ErrVecWriteRo
}
v.Lock()
defer v.Unlock()
if v.VMask != nil && v.VMask.Np != nil && v.VMask.Np.Contains(uint64(idx)) {
v.VMask.Np.Flip(uint64(idx), uint64(idx))
}
start := idx * int(v.Type.Size)
switch v.Type.Oid {
case types.T_int8:
data := encoding.EncodeInt8(val.(int8))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_int16:
data := encoding.EncodeInt16(val.(int16))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_int32:
data := encoding.EncodeInt32(val.(int32))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_int64:
data := encoding.EncodeInt64(val.(int64))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_uint8:
data := encoding.EncodeUint8(val.(uint8))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_uint16:
data := encoding.EncodeUint16(val.(uint16))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_uint32:
data := encoding.EncodeUint32(val.(uint32))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_uint64:
data := encoding.EncodeUint64(val.(uint64))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_float32:
data := encoding.EncodeFloat32(val.(float32))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_float64:
data := encoding.EncodeFloat64(val.(float64))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
// case types.T_decimal:
case types.T_date:
data := encoding.EncodeDate(val.(types.Date))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
case types.T_datetime:
data := encoding.EncodeDatetime(val.(types.Datetime))
copy(v.Data[start:start+int(v.Type.Size)], data)
return nil
default:
return ErrVecTypeNotSupport
}
}
func (v *StdVector) GetValue(idx int) (interface{}, error) {
if idx >= v.Length() || idx < 0 {
return nil, ErrVecInvalidOffset
}
if !v.IsReadonly() {
v.RLock()
}
start := idx * int(v.Type.Size)
data := v.Data[start : start+int(v.Type.Size)]
if !v.IsReadonly() {
v.RUnlock()
}
switch v.Type.Oid {
case types.T_int8:
return encoding.DecodeInt8(data), nil
case types.T_int16:
return encoding.DecodeInt16(data), nil
case types.T_int32:
return encoding.DecodeInt32(data), nil
case types.T_int64:
return encoding.DecodeInt64(data), nil
case types.T_uint8:
return encoding.DecodeUint8(data), nil
case types.T_uint16:
return encoding.DecodeUint16(data), nil
case types.T_uint32:
return encoding.DecodeUint32(data), nil
case types.T_uint64:
return encoding.DecodeUint64(data), nil
case types.T_float32:
return encoding.DecodeFloat32(data), nil
case types.T_float64:
return encoding.DecodeFloat64(data), nil
// case types.T_decimal:
case types.T_date:
return encoding.DecodeDate(data), nil
case types.T_datetime:
return encoding.DecodeDatetime(data), nil
default:
return nil, ErrVecTypeNotSupport
}
}
func (v *StdVector) Append(n int, vals interface{}) error {
if v.IsReadonly() {
return ErrVecWriteRo
}
v.Lock()
defer v.Unlock()
err := v.appendWithOffset(0, n, vals)
if err != nil {
return err
}
mask := v.StatMask & (^container.PosMask)
pos := uint64(len(v.Data)/int(v.Type.Size)) & container.PosMask
mask = mask | pos
if len(v.Data) == cap(v.Data) {
mask = mask | container.ReadonlyMask
}
atomic.StoreUint64(&v.StatMask, mask)
return nil
}
func (v *StdVector) appendWithOffset(offset, n int, vals interface{}) error {
var data []byte
switch v.Type.Oid {
case types.T_int8:
data = encoding.EncodeInt8Slice(vals.([]int8)[offset : offset+n])
case types.T_int16:
data = encoding.EncodeInt16Slice(vals.([]int16)[offset : offset+n])
case types.T_int32:
data = encoding.EncodeInt32Slice(vals.([]int32)[offset : offset+n])
case types.T_int64:
data = encoding.EncodeInt64Slice(vals.([]int64)[offset : offset+n])
case types.T_uint8:
data = encoding.EncodeUint8Slice(vals.([]uint8)[offset : offset+n])
case types.T_uint16:
data = encoding.EncodeUint16Slice(vals.([]uint16)[offset : offset+n])
case types.T_uint32:
data = encoding.EncodeUint32Slice(vals.([]uint32)[offset : offset+n])
case types.T_uint64:
data = encoding.EncodeUint64Slice(vals.([]uint64)[offset : offset+n])
case types.T_decimal64:
data = encoding.EncodeDecimal64Slice(vals.([]types.Decimal64)[offset : offset+n])
case types.T_float32:
data = encoding.EncodeFloat32Slice(vals.([]float32)[offset : offset+n])
case types.T_float64:
data = encoding.EncodeFloat64Slice(vals.([]float64)[offset : offset+n])
case types.T_date:
data = encoding.EncodeDateSlice(vals.([]types.Date)[offset : offset+n])
case types.T_datetime:
data = encoding.EncodeDatetimeSlice(vals.([]types.Datetime)[offset : offset+n])
default:
return ErrVecTypeNotSupport
}
if len(v.Data)+len(data) > cap(v.Data) {
return ErrVecInvalidOffset
}
v.Data = append(v.Data, data...)
return nil
}
func (v *StdVector) AppendVector(vec *gvec.Vector, offset int) (n int, err error) {
if offset < 0 || offset >= gvec.Length(vec) {
return n, ErrVecInvalidOffset
}
if v.IsReadonly() {
return 0, ErrVecWriteRo
}
v.Lock()
defer v.Unlock()
n = v.Capacity() - v.Length()
if n > gvec.Length(vec)-offset {
n = gvec.Length(vec) - offset
}
startRow := v.Length()
err = v.appendWithOffset(offset, n, vec.Col)
if err != nil {
return n, err
}
if vec.Nsp.Np != nil {
for row := startRow; row < startRow+gvec.Length(vec); row++ {
if nulls.Contains(vec.Nsp, uint64(offset+row-startRow)) {
nulls.Add(v.VMask, uint64(row))
}
}
}
mask := v.StatMask & (^container.PosMask)
pos := uint64(len(v.Data)/int(v.Type.Size)) & container.PosMask
mask = mask | pos
if len(v.Data) == cap(v.Data) {
mask = mask | container.ReadonlyMask
}
if nulls.Any(v.VMask) {
mask = mask | container.HasNullMask
}
atomic.StoreUint64(&v.StatMask, mask)
return n, err
}
func (v *StdVector) SliceReference(start, end int) (container.IVectorReader, error) {
if !v.IsReadonly() {
return nil, ErrVecNotRo
}
startIdx := start * int(v.Type.Size)
endIdx := end * int(v.Type.Size)
mask := container.ReadonlyMask | (uint64(end-start) & container.PosMask)
vec := &StdVector{
BaseVector: BaseVector{
Type: v.Type,
},
Data: v.Data[startIdx:endIdx],
}
if v.VMask.Np != nil {
vmask := nulls.Range(v.VMask, uint64(start), uint64(end), &nulls.Nulls{})
vec.VMask = vmask
if nulls.Any(vmask) {
mask = mask | container.HasNullMask
}
} else {
vec.VMask = &nulls.Nulls{}
}
vec.StatMask = mask
vec.Data = vec.Data[:len(vec.Data):len(vec.Data)]
return vec, nil
}
// func (v *StdVector) SetNull(idx int) error {
// v.Lock()
// mask := atomic.LoadUint64(&v.StatMask)
// if mask&ReadonlyMask != 0 {
// return VecWriteRoErr
// }
// pos := mask | PosMask
// if idx >= int(pos) {
// return VecInvalidOffsetErr
// }
// v.Unlock()
// newMask := mask | HasNullMask
// v.VMask.Add(uint64(idx))
// }
func (v *StdVector) GetLatestView() IVector {
if !v.IsReadonly() {
v.RLock()
defer v.RUnlock()
}
mask := atomic.LoadUint64(&v.StatMask)
endPos := int(mask & container.PosMask)
endIdx := endPos * int(v.Type.Size)
vec := &StdVector{
BaseVector: BaseVector{
StatMask: container.ReadonlyMask | mask,
Type: v.Type,
},
Data: v.Data[0:endIdx],
}
if mask&container.HasNullMask != 0 {
if mask&container.ReadonlyMask == 0 {
vec.VMask = nulls.Range(v.VMask, 0, uint64(endPos), &nulls.Nulls{})
} else {
vec.VMask = &nulls.Nulls{}
vec.VMask.Np = v.VMask.Np.Clone()
}
} else {
vec.VMask = &nulls.Nulls{}
}
vec.Data = vec.Data[:len(vec.Data):len(vec.Data)]
return vec
}
func (v *StdVector) Window(start, end uint32) IVector {
if !v.IsReadonly() {
v.RLock()
defer v.RUnlock()
}
mask := atomic.LoadUint64(&v.StatMask)
endPos := int(mask & container.PosMask)
mask = mask & ^container.PosMask
if end > uint32(endPos) {
end = uint32(endPos)
}
newPos := uint64(end-start) & container.PosMask
newMask := mask | newPos
startIdx := int(start) * int(v.Type.Size)
endIdx := int(end) * int(v.Type.Size)
vec := &StdVector{
BaseVector: BaseVector{
StatMask: container.ReadonlyMask | newMask,
Type: v.Type,
},
Data: v.Data[startIdx:endIdx],
}
if mask&container.HasNullMask != 0 {
if mask&container.ReadonlyMask == 0 {
var np *roaring64.Bitmap
if v.VMask != nil {
np = common.BitMap64Window(v.VMask.Np, int(start), int(end))
}
vec.VMask = &nulls.Nulls{Np: np}
}
} else {
vec.VMask = &nulls.Nulls{}
}
vec.Data = vec.Data[:len(vec.Data):len(vec.Data)]
return vec
}
func (v *StdVector) CopyToVectorWithBuffer(compressed *bytes.Buffer, deCompressed *bytes.Buffer) (*gvec.Vector, error) {
if atomic.LoadUint64(&v.StatMask)&container.ReadonlyMask == 0 {
return nil, ErrVecNotRo
}
nullSize := 0
var nullbuf []byte
var err error
if nulls.Any(v.VMask) {
nullbuf, err = v.VMask.Show()
if err != nil {
return nil, err
}
nullSize = len(nullbuf)
}
length := v.Length()
vec := gvec.New(v.Type)
capacity := encoding.TypeSize + 4 + nullSize + length*int(v.Type.Size)
deCompressed.Reset()
if capacity > deCompressed.Cap() {
deCompressed.Grow(capacity)
}
buf := deCompressed.Bytes()
buf = buf[:capacity]
dBuf := buf
copy(dBuf, encoding.EncodeType(v.Type))
dBuf = dBuf[encoding.TypeSize:]
copy(dBuf, encoding.EncodeUint32(uint32(nullSize)))
dBuf = dBuf[4:]
if nullSize > 0 {
copy(dBuf, nullbuf)
dBuf = dBuf[nullSize:]
}
copy(dBuf, v.Data)
err = vec.Read(buf)
if err != nil {
return nil, err
}
return vec, nil
}
func (v *StdVector) Clone() *StdVector {
data := make([]byte, len(v.Data))
copy(data, v.Data)
vmask := &nulls.Nulls{}
if v.VMask.Np != nil {
vmask.Np = v.VMask.Np.Clone()
}
return &StdVector{
Data: data,
BaseVector: BaseVector{
VMask: vmask,
StatMask: v.StatMask,
Type: types.Type{
Oid: v.Type.Oid,
Size: v.Type.Size,
Width: v.Type.Width,
},
},
}
}
func (v *StdVector) CopyToVector() (*gvec.Vector, error) {
if atomic.LoadUint64(&v.StatMask)&container.ReadonlyMask == 0 {
return nil, ErrVecNotRo
}
length := v.Length()
vec := gvec.New(v.Type)
vec.Data = v.Data
switch v.Type.Oid {
case types.T_int8:
col := make([]int8, length)
curCol := encoding.DecodeInt8Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_int16:
col := make([]int16, length)
curCol := encoding.DecodeInt16Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_int32:
col := make([]int32, length)
curCol := encoding.DecodeInt32Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_int64:
col := make([]int64, length)
curCol := encoding.DecodeInt64Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_uint8:
col := make([]uint8, length)
curCol := encoding.DecodeUint8Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_uint16:
col := make([]uint16, length)
curCol := encoding.DecodeUint16Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_uint32:
col := make([]uint32, length)
curCol := encoding.DecodeUint32Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_uint64:
col := make([]uint64, length)
curCol := encoding.DecodeUint64Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_decimal64:
col := make([]types.Decimal64, length)
curCol := encoding.DecodeDecimal64Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_float32:
col := make([]float32, length)
curCol := encoding.DecodeFloat32Slice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_float64:
col := make([]float64, length)
curCol := encoding.DecodeFloat64Slice(v.Data)
copy(col[0:], curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_date:
col := make([]types.Date, length)
curCol := encoding.DecodeDateSlice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
case types.T_datetime:
col := make([]types.Datetime, length)
curCol := encoding.DecodeDatetimeSlice(v.Data)
copy(col, curCol[:length])
vec.Col = col
vec.Nsp = nulls.Range(v.VMask, uint64(0), uint64(length), &nulls.Nulls{})
default:
return nil, ErrVecTypeNotSupport
}
return vec, nil
}
func (vec *StdVector) WriteTo(w io.Writer) (n int64, err error) {
buf, err := vec.Marshal()
if err != nil {
return n, err
}
nw, err := w.Write(buf)
return int64(nw), err
}
func (vec *StdVector) ReadFrom(r io.Reader) (n int64, err error) {
capBuf := make([]byte, 8)
_, err = r.Read(capBuf)
if err != nil {
return n, err
}
// TODO: will remove below os.File type check.
switch f := r.(type) {
case *os.File:
f.Seek(0, io.SeekStart)
}
realSize := encoding.DecodeUint64(capBuf)
buf := make([]byte, realSize)
_, err = r.Read(buf)
if err != nil {
return n, err
}
copy(buf[0:], capBuf)
err = vec.Unmarshal(buf)
return int64(realSize), err
}
func (vec *StdVector) Unmarshal(data []byte) error {
if data == nil || len(data) == 0 {
return nil
}
buf := data
vec.NodeCapacity = encoding.DecodeUint64(buf[:8])
buf = buf[8:]
vec.StatMask = encoding.DecodeUint64(buf[:8])
buf = buf[8:]
vec.Type = encoding.DecodeType(buf[:encoding.TypeSize])
buf = buf[encoding.TypeSize:]
nb := encoding.DecodeUint32(buf[:4])
buf = buf[4:]
if nb > 0 {
if err := vec.VMask.Read(buf[:nb]); err != nil {
return err
}
buf = buf[nb:]
}
if vec.MNode != nil {
vec.Data = vec.Data[:len(buf)]
copy(vec.Data[0:], buf)
} else {
vec.Data = buf
}
return nil
}
func (vec *StdVector) Marshal() ([]byte, error) {
var buf bytes.Buffer
buf.Write(encoding.EncodeUint64(uint64(0)))
buf.Write(encoding.EncodeUint64(vec.StatMask))
buf.Write(encoding.EncodeType(vec.Type))
nb, err := vec.VMask.Show()
if err != nil {
return nil, err
}
buf.Write(encoding.EncodeUint32(uint32(len(nb))))
if len(nb) > 0 {
buf.Write(nb)
}
buf.Write(vec.Data)
buffer := buf.Bytes()
capBuf := encoding.EncodeUint64(uint64(len(buffer)))
copy(buffer[0:], capBuf)
vec.NodeCapacity = uint64(len(buffer))
return buf.Bytes(), nil
}
func (vec *StdVector) Reset() {
vec.Data = nil
} | pkg/vm/engine/tae/container/vector/stdvec.go | 0.509032 | 0.430626 | stdvec.go | starcoder |
package iso20022
// Transfer from one investment fund/fund class to another investment fund or investment fund class by the investor. A switch is composed of one or several subscription legs, and one or several redemption legs.
type SwitchOrder2 struct {
// Date and time at which the order was placed by the investor.
OrderDateTime *ISODateTime `xml:"OrdrDtTm,omitempty"`
// Unique and unambiguous identifier for an order, as assigned by the instructing party.
OrderReference *Max35Text `xml:"OrdrRef"`
// Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
InvestmentAccountDetails *InvestmentAccount13 `xml:"InvstmtAcctDtls"`
// Amount of money used to derive the quantity of investment fund units to be redeemed.
TotalRedemptionAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TtlRedAmt,omitempty"`
// Amount of money used to derive the quantity of investment fund units to be subscribed.
TotalSubscriptionAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TtlSbcptAmt,omitempty"`
// Date on which the order expires.
ExpiryDateTime *ISODateTime `xml:"XpryDtTm,omitempty"`
// Additional amount of money paid by the investor in addition to the switch redemption amount.
AdditionalCashIn *ActiveOrHistoricCurrencyAndAmount `xml:"AddtlCshIn,omitempty"`
// Amount of money that results from a switch-out, that is not reinvested in another investment fund, and is repaid to the investor.
ResultingCashOut *ActiveOrHistoricCurrencyAndAmount `xml:"RsltgCshOut,omitempty"`
// Cancellation right of an investor with respect to an investment fund order.
CancellationRight *CancellationRight1 `xml:"CxlRght,omitempty"`
// Part of an investment fund switch order that is a redemption.
RedemptionLegDetails []*SwitchRedemptionLegOrder2 `xml:"RedLegDtls"`
// Part of an investment fund switch order that is a subscription.
SubscriptionLegDetails []*SwitchSubscriptionLegOrder2 `xml:"SbcptLegDtls"`
// Payment transaction resulting from the investment fund order execution.
CashSettlementDetails *PaymentTransaction20 `xml:"CshSttlmDtls,omitempty"`
// Information needed to process a currency exchange or conversion.
ForeignExchangeDetails *ForeignExchangeTerms5 `xml:"FXDtls,omitempty"`
}
func (s *SwitchOrder2) SetOrderDateTime(value string) {
s.OrderDateTime = (*ISODateTime)(&value)
}
func (s *SwitchOrder2) SetOrderReference(value string) {
s.OrderReference = (*Max35Text)(&value)
}
func (s *SwitchOrder2) AddInvestmentAccountDetails() *InvestmentAccount13 {
s.InvestmentAccountDetails = new(InvestmentAccount13)
return s.InvestmentAccountDetails
}
func (s *SwitchOrder2) SetTotalRedemptionAmount(value, currency string) {
s.TotalRedemptionAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (s *SwitchOrder2) SetTotalSubscriptionAmount(value, currency string) {
s.TotalSubscriptionAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (s *SwitchOrder2) SetExpiryDateTime(value string) {
s.ExpiryDateTime = (*ISODateTime)(&value)
}
func (s *SwitchOrder2) SetAdditionalCashIn(value, currency string) {
s.AdditionalCashIn = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (s *SwitchOrder2) SetResultingCashOut(value, currency string) {
s.ResultingCashOut = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (s *SwitchOrder2) AddCancellationRight() *CancellationRight1 {
s.CancellationRight = new(CancellationRight1)
return s.CancellationRight
}
func (s *SwitchOrder2) AddRedemptionLegDetails() *SwitchRedemptionLegOrder2 {
newValue := new (SwitchRedemptionLegOrder2)
s.RedemptionLegDetails = append(s.RedemptionLegDetails, newValue)
return newValue
}
func (s *SwitchOrder2) AddSubscriptionLegDetails() *SwitchSubscriptionLegOrder2 {
newValue := new (SwitchSubscriptionLegOrder2)
s.SubscriptionLegDetails = append(s.SubscriptionLegDetails, newValue)
return newValue
}
func (s *SwitchOrder2) AddCashSettlementDetails() *PaymentTransaction20 {
s.CashSettlementDetails = new(PaymentTransaction20)
return s.CashSettlementDetails
}
func (s *SwitchOrder2) AddForeignExchangeDetails() *ForeignExchangeTerms5 {
s.ForeignExchangeDetails = new(ForeignExchangeTerms5)
return s.ForeignExchangeDetails
} | SwitchOrder2.go | 0.78403 | 0.439988 | SwitchOrder2.go | starcoder |
package main
import (
"fmt"
"log"
)
var AstOps = []string{"+", "-", "*", "/"}
// Given an AST, interpret the
// operators in it and return
// a final value.
func interpretAST(n *AstNode) string {
var leftval, rightval string
// Get the left and right sub-tree values
if n.left != nil {
leftval = interpretAST(n.left)
}
if n.right != nil {
rightval = interpretAST(n.right)
}
switch n.op {
case A_ADD:
return genAdd(leftval, rightval)
case A_SUBTRACT:
return genSub(leftval, rightval)
case A_MULTIPLY:
return genMul(leftval, rightval)
case A_DIVIDE:
return genDiv(leftval, rightval)
case A_EQ:
return genEq(leftval, rightval)
case A_NEQ:
return genNeq(leftval, rightval)
case A_GT:
return genGt(leftval, rightval)
case A_GE:
return genGe(leftval, rightval)
case A_LT:
return genLt(leftval, rightval)
case A_LE:
return genLe(leftval, rightval)
case A_INTLIT:
return genNumber(n)
case A_IDENT:
return genIdent(n)
case A_ASSIGNVAL:
return genAssignVal(n)
case A_ASSIGN:
return genAssign(leftval, rightval)
case A_PRINT:
return genPrint(leftval)
case A_IF:
return genIf(n)
case A_WHILE:
return genWhile(n)
case A_FUNC:
return genFunction(n)
case A_FUNC_CALL:
return genFuncCall(n)
case A_GLUETO:
return leftval + rightval + "\n"
case A_NODE:
return genNodeDeclaration(n)
default:
log.Fatalf("Unknown AST operator %d\n", n.op)
panic("Unknown AST operator")
}
}
func genNodeDeclaration(node *AstNode) string {
nodeName := getSymbolFromAst(node).name
fnHead := fmt.Sprintf("%v :=func(){\n", nodeName)
fnBody := fmt.Sprintf(" %v }\n", interpretAST(node.left))
return fnHead + fnBody
}
func genFunction(node *AstNode) string {
fnHead := fmt.Sprintf("%v =func(){\n", getSymbolFromAst(node).name)
fnBody := fmt.Sprintf(" %v \n %v }\n", genAllLocalVariables(node.left), interpretAST(node.left))
return fnHead + fnBody
}
func genFuncCall(node *AstNode) string {
fnCall := fmt.Sprintf("%v()", getSymbolFromAst(node).name)
return fnCall
}
func genWhile(node *AstNode) string {
whilehead := fmt.Sprintf("for %v {\n", interpretAST(node.left))
trueBody := fmt.Sprintf(" %v }\n", interpretAST(node.mid))
return whilehead + trueBody
}
func genIf(node *AstNode) string {
ifhead := fmt.Sprintf("if %v {\n", interpretAST(node.left))
trueBody := fmt.Sprintf(" %v }\n", interpretAST(node.mid))
falseBody := ""
if node.right != nil {
falseBody = fmt.Sprintf("else {\n %v }\n", interpretAST(node.right))
}
return ifhead + trueBody + falseBody
} | 13_SymbolTables/compiler.go | 0.503662 | 0.436802 | compiler.go | starcoder |
package fuzzy
import (
"bytes"
"unicode"
"unicode/utf8"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
func noopTransformer() transform.Transformer {
return transform.Nop
}
func foldTransformer() transform.Transformer {
return unicodeFoldTransformer{}
}
func normalizeTransformer() transform.Transformer {
return transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
}
func normalizedFoldTransformer() transform.Transformer {
return transform.Chain(normalizeTransformer(), foldTransformer())
}
// Match returns true if source matches target using a fuzzy-searching
// algorithm. Note that it doesn't implement Levenshtein distance (see
// RankMatch instead), but rather a simplified version where there's no
// approximation. The method will return true only if each character in the
// source can be found in the target and occurs after the preceding matches.
func Match(source, target string) bool {
return match(source, target, noopTransformer())
}
// MatchFold is a case-insensitive version of Match.
func MatchFold(source, target string) bool {
return match(source, target, foldTransformer())
}
// MatchNormalized is a unicode-normalized version of Match.
func MatchNormalized(source, target string) bool {
return match(source, target, normalizeTransformer())
}
// MatchNormalizedFold is a unicode-normalized and case-insensitive version of Match.
func MatchNormalizedFold(source, target string) bool {
return match(source, target, normalizedFoldTransformer())
}
func match(source, target string, transformer transform.Transformer) bool {
source = stringTransform(source, transformer)
target = stringTransform(target, transformer)
lenDiff := len(target) - len(source)
if lenDiff < 0 {
return false
}
if lenDiff == 0 && source == target {
return true
}
Outer:
for _, r1 := range source {
for i, r2 := range target {
if r1 == r2 {
target = target[i+utf8.RuneLen(r2):]
continue Outer
}
}
return false
}
return true
}
// Find will return a list of strings in targets that fuzzy matches source.
func Find(source string, targets []string) []string {
return find(source, targets, noopTransformer())
}
// FindFold is a case-insensitive version of Find.
func FindFold(source string, targets []string) []string {
return find(source, targets, foldTransformer())
}
// FindNormalized is a unicode-normalized version of Find.
func FindNormalized(source string, targets []string) []string {
return find(source, targets, normalizeTransformer())
}
// FindNormalizedFold is a unicode-normalized and case-insensitive version of Find.
func FindNormalizedFold(source string, targets []string) []string {
return find(source, targets, normalizedFoldTransformer())
}
func find(source string, targets []string, transformer transform.Transformer) []string {
var matches []string
for _, target := range targets {
if match(source, target, transformer) {
matches = append(matches, target)
}
}
return matches
}
// RankMatch is similar to Match except it will measure the Levenshtein
// distance between the source and the target and return its result. If there
// was no match, it will return -1.
// Given the requirements of match, RankMatch only needs to perform a subset of
// the Levenshtein calculation, only deletions need be considered, required
// additions and substitutions would fail the match test.
func RankMatch(source, target string) int {
return rank(source, target, noopTransformer())
}
// RankMatchFold is a case-insensitive version of RankMatch.
func RankMatchFold(source, target string) int {
return rank(source, target, foldTransformer())
}
// RankMatchNormalized is a unicode-normalized version of RankMatch.
func RankMatchNormalized(source, target string) int {
return rank(source, target, normalizeTransformer())
}
// RankMatchNormalizedFold is a unicode-normalized and case-insensitive version of RankMatch.
func RankMatchNormalizedFold(source, target string) int {
return rank(source, target, normalizedFoldTransformer())
}
func rank(source, target string, transformer transform.Transformer) int {
lenDiff := len(target) - len(source)
if lenDiff < 0 {
return -1
}
source = stringTransform(source, transformer)
target = stringTransform(target, transformer)
if lenDiff == 0 && source == target {
return 0
}
runeDiff := 0
Outer:
for _, r1 := range source {
for i, r2 := range target {
if r1 == r2 {
target = target[i+utf8.RuneLen(r2):]
continue Outer
} else {
runeDiff++
}
}
return -1
}
// Count up remaining char
runeDiff += utf8.RuneCountInString(target)
return runeDiff
}
// RankFind is similar to Find, except it will also rank all matches using
// Levenshtein distance.
func RankFind(source string, targets []string) Ranks {
return rankFind(source, targets, noopTransformer())
}
// RankFindFold is a case-insensitive version of RankFind.
func RankFindFold(source string, targets []string) Ranks {
return rankFind(source, targets, foldTransformer())
}
// RankFindNormalized is a unicode-normalized version of RankFind.
func RankFindNormalized(source string, targets []string) Ranks {
return rankFind(source, targets, normalizeTransformer())
}
// RankFindNormalizedFold is a unicode-normalized and case-insensitive version of RankFind.
func RankFindNormalizedFold(source string, targets []string) Ranks {
return rankFind(source, targets, normalizedFoldTransformer())
}
func rankFind(source string, targets []string, transformer transform.Transformer) Ranks {
var r Ranks
for index, target := range targets {
if match(source, target, transformer) {
distance := LevenshteinDistance(source, target)
r = append(r, Rank{source, target, distance, index})
}
}
return r
}
type Rank struct {
// Source is used as the source for matching.
Source string
// Target is the word matched against.
Target string
// Distance is the Levenshtein distance between Source and Target.
Distance int
// Location of Target in original list
OriginalIndex int
}
type Ranks []Rank
func (r Ranks) Len() int {
return len(r)
}
func (r Ranks) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
func (r Ranks) Less(i, j int) bool {
return r[i].Distance < r[j].Distance
}
func stringTransform(s string, t transform.Transformer) (transformed string) {
var err error
transformed, _, err = transform.String(t, s)
if err != nil {
transformed = s
}
return
}
type unicodeFoldTransformer struct{}
func (unicodeFoldTransformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
runes := bytes.Runes(src)
var lowerRunes []rune
for _, r := range runes {
lowerRunes = append(lowerRunes, unicode.ToLower(r))
}
srcBytes := []byte(string(lowerRunes))
n := copy(dst, srcBytes)
if n < len(srcBytes) {
err = transform.ErrShortDst
}
return n, n, err
}
func (unicodeFoldTransformer) Reset() {} | fuzzy/fuzzy.go | 0.820073 | 0.440409 | fuzzy.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"math/bits"
"math/rand"
"os"
"strconv"
"strings"
"time"
)
// the min similarity we're looking for
const THRESHOLD = 70
// the cutoff for a single uint64's similarity to be considered
const CUTOFF = 15
// the number of bits each vector holds
const AMOUNT_OF_BITS = 256
// the depth of our hashing
const AMOUNT_OF_SUBBUCKETS = 5
// the size of a single permutation
// 64 instead of 256 as we only check first entry
// of the BitVector 256
const PERM_SIZE = 64
// A BitVector represented as 4 uint64 with an additional
// index indicating it's index in the original slice of BitVectors.
type BitVector256 struct {
a uint64
b uint64
c uint64
d uint64
index int
}
// A Key used as key in a map
type Key [AMOUNT_OF_SUBBUCKETS]uint8
type BucketMap map[Key][]BitVector256
// Returns the similarity of two BitVectors
// meaning the number of bits set in both Vectors
func (b BitVector256) Compare(b1 BitVector256) int {
similarity := 0
similarity += bits.OnesCount64(b.a & b1.a)
if similarity < CUTOFF {
return 0
}
similarity += bits.OnesCount64(b.b & b1.b)
if similarity < CUTOFF*2 {
return 0
}
similarity += bits.OnesCount64(b.c & b1.c)
similarity += bits.OnesCount64(b.d & b1.d)
return similarity
}
// brute force in a cache efficient way
func correlatedPair(vectors []BitVector256) (int, int) {
for i := 0; i < len(vectors); i++ {
vec := vectors[i]
for j := i + 1; j < len(vectors); j++ {
if vec.Compare(vectors[j]) >= THRESHOLD {
return vec.index, vectors[j].index
}
}
}
return -1, -1
}
func compareInBuckets(buckets BucketMap) (int, int) {
//fmt.Printf("Amount of buckets %d\n", len(buckets))
for _, bucket := range buckets {
i1, i2 := correlatedPair(bucket)
if i1 != -1 {
return i1, i2
}
}
return -1, -1
}
func findSetBit(permutation [PERM_SIZE]uint8, vector BitVector256) uint8 {
for i := 0; i < AMOUNT_OF_BITS; i++ {
v := permutation[i]
if vector.a&(1<<v) != 0 {
return v
}
}
// this should never be the case with the input sake
// will however happen for BitVector256 with all 0's
return 0
}
// generate the numbers 0..PERM_SIZE as uint8
func defaultPermutation() [PERM_SIZE]uint8 {
var arr [PERM_SIZE]uint8
for i := 0; i < PERM_SIZE; i++ {
arr[i] = uint8(i)
}
return arr
}
// permutates an array similarly to how golang's libraries do it
func generatePermutation(slice [PERM_SIZE]uint8) [PERM_SIZE]uint8 {
for i := range slice {
j := rand.Intn(i + 1)
slice[i], slice[j] = slice[j], slice[i]
}
return slice
}
func groupInBuckets(vectors []BitVector256) BucketMap {
rand.Seed(time.Now().UTC().UnixNano())
buckets := make(BucketMap)
var permutations [AMOUNT_OF_SUBBUCKETS][PERM_SIZE]uint8
// generate all the permutations once
defaultPermutation := defaultPermutation()
for i := 0; i < AMOUNT_OF_SUBBUCKETS; i++ {
permutations[i] = generatePermutation(defaultPermutation)
}
for _, v := range vectors {
var key [AMOUNT_OF_SUBBUCKETS]uint8
// create the key as an array of size AMOUNT_OF_SUBBUCKETS
for j := 0; j < AMOUNT_OF_SUBBUCKETS; j++ {
key[j] = findSetBit(permutations[j], v)
}
buckets[key] = append(buckets[key], v)
}
return buckets
}
// a single run of minhash, not guaranteed to find the pair
func minHash(vectors []BitVector256) (int, int) {
buckets := groupInBuckets(vectors)
return compareInBuckets(buckets)
}
// recursively try until result is found
func MinHash(vectors []BitVector256) (int, int) {
i1, i2 := minHash(vectors)
if i1 != -1 {
return i1, i2
}
return MinHash(vectors)
}
func readVectors(filename string, vectorAmount int) []BitVector256 {
vectors := make([]BitVector256, 0, vectorAmount)
if file, err := os.Open(filename); err == nil {
defer file.Close()
scanner := bufio.NewScanner(file)
index := 0
for scanner.Scan() {
words := strings.Fields(scanner.Text())
a, _ := strconv.ParseInt(words[0], 10, 64)
b, _ := strconv.ParseInt(words[1], 10, 64)
c, _ := strconv.ParseInt(words[2], 10, 64)
d, _ := strconv.ParseInt(words[3], 10, 64)
vector := BitVector256{uint64(a), uint64(b), uint64(c), uint64(d), index}
index += 1
vectors = append(vectors, vector)
}
if scanErr := scanner.Err(); err != nil {
log.Fatal(scanErr)
}
} else {
log.Fatal(err)
}
return vectors
}
func main() {
filename := os.Args[1]
//longAmount, _ := strconv.Atoi(os.Args[2])
vectorAmount, _ := strconv.Atoi(os.Args[3])
vectors := readVectors(filename, vectorAmount)
// returns the indices of the correlated pair within vectors
low, high := MinHash(vectors)
fmt.Printf("%d %d\n", low, high)
} | CorrelatedPair/minhash.go | 0.632616 | 0.520253 | minhash.go | starcoder |
package mountlib
// "@" will be replaced by the command name, "|" will be replaced by backticks
var mountHelp = `
rclone @ allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with
FUSE.
First set up your remote using |rclone config|. Check it works with |rclone ls| etc.
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
Mount runs in foreground mode by default, use the |--daemon| flag to specify background mode.
You can only run mount in foreground mode on Windows.
In background mode rclone acts as a generic Unix mount program: the main program
starts, spawns a background rclone process to setup and maintain the mount, waits
until success or timeout, kills the child process if mount fails, and immediately
exits with appropriate return code.
On Linux/macOS/FreeBSD start the mount like this, where |/path/to/local/mount|
is an **empty** **existing** directory:
rclone @ remote:path/to/files /path/to/local/mount
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. If foreground mount is used interactively from a console window,
rclone will serve the mount and occupy the console so another window should be
used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
The following examples will mount to an automatically assigned drive,
to specific drive letter |X:|, to path |C:\path\parent\mount|
(where parent directory or drive must exist, and mount must **not** exist,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
the last example will mount as network share |\\cloud\remote| and map it to an
automatically assigned drive:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files \\cloud\remote
When the program ends while in foreground mode, either via Ctrl+C or receiving
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
# OS X
umount /path/to/local/mount
The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually.
The size of the mounted file system will be set according to information retrieved
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
command. Remotes with unlimited storage may report the used size only,
then an additional 1 PiB of free space is assumed. If the remote does not
[support](https://rclone.org/overview/#optional-features) the about feature
at all, then 1 PiB is set as both the total and the free size.
### Installing on Windows
To run rclone @ on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/).
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
Windows File System Proxy which makes it easy to write user space file
systems for Windows. It provides a FUSE emulation layer which rclone
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
Both of these packages are by <NAME> who was very helpful
during the implementation of rclone @ for Windows.
#### Mounting modes on windows
Unlike other operating systems, Microsoft Windows provides a different filesystem
type for network and fixed drives. It optimises access on the assumption fixed
disk drives are fast and reliable, while network drives have relatively high latency
and less reliability. Some settings can also be differentiated between the two types,
for example that Windows Explorer should just display icons and not create preview
thumbnails for image and video files on network drives.
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
However, you can also choose to mount it as a remote network drive, often described
as a network share. If you mount an rclone remote using the default, fixed drive mode
and experience unexpected program errors, freezes or other issues, consider mounting
as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **non-existent** subdirectory of an **existing** parent
directory or drive. Using the special value |*| will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving backward.
Examples:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files X:
Option |--volname| can be used to set a custom volume name for the mounted
file system. The default is to use the remote name and path.
To mount as network drive, you can add option |--network-mode|
to your @ command. Mounting to a directory path is not supported in
this mode, it is a limitation Windows imposes on junctions, so the remote must always
be mounted to a drive letter.
rclone @ remote:path/to/files X: --network-mode
A volume name specified with |--volname| will be used to create the network share path.
A complete UNC path, such as |\\cloud\remote|, optionally with path
|\\cloud\remote\madeup\path|, will be used as is. Any other
string will be used as the share part, after a default prefix |\\server\|.
If no volume name is specified then |\\server\share| will be used.
You must make sure the volume name is unique when you are mounting more than one drive,
or else the mount command will fail. The share name will treated as the volume label for
the mapped drive, shown in Windows Explorer etc, while the complete
|\\server\share| will be reported as the remote UNC path by
|net use| etc, just like a normal network drive mapping.
If you specify a full network share UNC path with |--volname|, this will implicitely
set the |--network-mode| option, so the following two examples have same result:
rclone @ remote:path/to/files X: --network-mode
rclone @ remote:path/to/files X: --volname \\server\share
You may also specify the network share UNC path as the mountpoint itself. Then rclone
will automatically assign a drive letter, same as with |*| and use that as
mountpoint, and instead use the UNC path specified as the volume name, as if it were
specified with the |--volname| option. This will also implicitely set
the |--network-mode| option. This means the following two examples have same result:
rclone @ remote:path/to/files \\cloud\remote
rclone @ remote:path/to/files * --volname \\cloud\remote
There is yet another way to enable network mode, and to set the share path,
and that is to pass the "native" libfuse/WinFsp option directly:
|--fuse-flag --VolumePrefix=\server\share|. Note that the path
must be with just a single backslash prefix in this case.
*Note:* In previous versions of rclone this was the only supported method.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
See also [Limitations](#limitations) section below.
#### Windows filesystem permissions
The FUSE emulation layer on Windows must convert between the POSIX-based
permission model used in FUSE, and the permission model used in Windows,
based on access-control lists (ACL).
The mounted filesystem will normally get three entries in its access-control list (ACL),
representing permissions for the POSIX permission scopes: Owner, group and others.
By default, the owner and group will be taken from the current user, and the built-in
group "Everyone" will be used to represent others. The user/group can be customized
with FUSE options "UserName" and "GroupName",
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
The permissions on each entry will be set according to
[options](#options) |--dir-perms| and |--file-perms|,
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
where the default corresponds to |--file-perms 0666 --dir-perms 0777|.
Note that the mapping of permissions is not always trivial, and the result
you see in Windows Explorer may not be exactly like you expected.
For example, when setting a value that includes write access, this will be
mapped to individual permissions "write attributes", "write data" and "append data",
but not "write extended attributes". Windows will then show this as basic
permission "Special" instead of "Write", because "Write" includes the
"write extended attributes" permission.
If you set POSIX permissions for only allowing access to the owner, using
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
group will still be given some special permissions, such as "read attributes"
and "read permissions", in Windows. This is done for compatibility reasons,
e.g. to allow users without additional permissions to be able to read basic
metadata about files like in UNIX. One case that may arise is that other programs
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
#### Windows caveats
Drives created as Administrator are not visible to other accounts,
not even an account that was elevated to Administrator with the
User Account Control (UAC) feature. A result of this is that if you mount
to a drive letter from a Command Prompt run as Administrator, and then try
to access the same drive from Windows Explorer (which does not run as
Administrator), you will not be able to see the mounted drive.
If you don't need to access the drive from applications running with
administrative privileges, the easiest way around this is to always
create the mount from a non-elevated command prompt.
To make mapped drives available to the user account that created them
regardless if elevated or not, there is a special Windows setting called
[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
that can be enabled.
It is also possible to make a drive mount available to everyone on the system,
by running the process creating it as the built-in SYSTEM account.
There are several ways to do this: One is to use the command-line
utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
from Microsoft's Sysinternals suite, which has option |-s| to start
processes as the SYSTEM account. Another alternative is to run the mount
command from a Windows Scheduled Task, or a Windows Service, configured
to run as the SYSTEM account. A third alternative is to use the
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
Note that when running rclone as another user, it will not use
the configuration file from your profile unless you tell it to
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
Read more in the [install documentation](https://rclone.org/install/).
Note that mapping to a directory path, instead of a drive letter,
does not suffer from the same limitations.
### Limitations
Without the use of |--vfs-cache-mode| this can only write files
sequentially, it can only seek when reading. This means that many
applications won't work with their files on an rclone mount without
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
See the [VFS File Caching](#vfs-file-caching) section for more info.
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
Hubic) do not support the concept of empty directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.
When mount is invoked on Unix with |--daemon|, the main rclone program
will wait until the background mount is ready until timeout specified by
the |--daemon-wait| flag. On Linux rclone will poll ProcFS to check status
so the flag sets the **maximum time to wait**. On macOS/BSD the time to wait
is constant and the check is performed only at the end of sleep so don't
set it too high...
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
### rclone @ vs rclone sync/copy
File systems expect things to be 100% reliable, whereas cloud storage
systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone @
can't use retries in the same way without making local copies of the
uploads. Look at the [VFS File Caching](#vfs-file-caching)
for solutions to make @ more reliable.
### Attribute caching
You can use the flag |--attr-timeout| to set the time the kernel caches
the attributes (size, modification time, etc.) for directory entries.
The default is |1s| which caches files just long enough to avoid
too many callbacks to rclone from the kernel.
In theory 0s should be the correct value for filesystems which can
change outside the control of the kernel. However this causes quite a
few problems such as
[rclone using too much memory](https://github.com/rclone/rclone/issues/2157),
[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
The kernel can cache the info about a file for the time given by
|--attr-timeout|. You may see corruption if the remote file changes
length during this window. It will show up as either a truncated file
or a file with garbage on the end. With |--attr-timeout 1s| this is
very unlikely but not impossible. The higher you set |--attr-timeout|
the more likely it is. The default setting of "1s" is the lowest
setting which mitigates the problems above.
If you set it higher (|10s| or |1m| say) then the kernel will call
back to rclone less often making it more efficient, however there is
more chance of the corruption issue above.
If files don't change on the remote outside of the control of rclone
then there is no chance of corruption.
This is the same as setting the attr_timeout option in mount.fuse.
### Filters
Note that all the rclone filters can be used to select a subset of the
files to be visible in the mount.
### systemd
When running rclone @ as a systemd service, it is possible
to use Type=notify. In this case the service will enter the started state
after the mountpoint has been successfully set up.
Units having the rclone @ service specified as a requirement
will see all files and folders immediately in this mode.
Note that systemd runs mount units without any environment variables including
|PATH| or |HOME|. This means that tilde (|~|) expansion will not work
and you should provide |--config| and |--cache-dir| explicitly as absolute
paths via rclone arguments. Since mounting requires the |fusermount| program,
rclone will use the fallback PATH of |/bin:/usr/bin| in this scenario.
Please ensure that |fusermount| is present on this PATH.
### Rclone as Unix mount helper
The core Unix program |/bin/mount| normally takes the |-t FSTYPE| argument
then runs the |/sbin/mount.FSTYPE| helper program passing it mount options
as |-o key=val,...| or |--opt=...|. Automount (classic or systemd) follows
the suit.
rclone by default expects GNU-style flags |--key val|. To run it as a
mount helper you should symlink the rclone binary to |/sbin/mount.rclone|
and optionally |/usr/bin/rclonefs|, e.g. |ln -s /usr/bin/rclone /sbin/mount.rclone|.
Now you can run classic mounts like this:
|||
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
|||
or create systemd mount units:
|||
# /etc/systemd/system/mnt-data.mount
[Unit]
After=network-online.target
[Mount]
Type=rclone
What=sftp1:subdir
Where=/mnt/data
Options=rw,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
|||
optionally augmented by systemd automount unit
|||
# /etc/systemd/system/mnt-data.automount
[Unit]
After=network-online.target
Before=remote-fs.target
[Automount]
Where=/mnt/data
TimeoutIdleSec=600
[Install]
WantedBy=multi-user.target
|||
or add in |/etc/fstab| a line like
|||
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
|||
or use classic Automountd.
Remember to provide explicit |config=...,cache-dir=...| as mount units
run without |HOME|.
Rclone in the mount helper mode will split |-o| argument(s) by comma, replace |_|
by |-| and prepend |--| to get the command-line flags. Options containing commas
or spaces can be wrapped in single or double quotes. Any quotes inside outer quotes
should be doubled.
Mount option syntax includes a few extra options treated specially:
- |env.NAME=VALUE| will set an environment variable for.
This helps with Automountd and Systemd.mount which don't allow to set custom
environment for mount helpers.
Typically you will use |env.HTTPS_PROXY=proxy.host:3128| or |env.HOME=/root|
- |command=cmount| can be used to run any other command rather than default mount
- |args2env| will pass mount options to the background mount helper via environment
variables instead of command line arguments. This allows to hide secrets from such
commands as |ps| or |pgrep|.
- |vv...| will be transformed into appropriate |--verbose=N|
- standard mount options like |x-systemd.automount|, |_netdev|, |nosuid| and alike
are intended only for Automountd so ignored by rclone
` | cmd/mountlib/help.go | 0.765243 | 0.451931 | help.go | starcoder |
package analysis
// TokenLocation represents one occurrence of a term at a particular location in
// a field. Start, End and Position have the same meaning as in analysis.Token.
// Field and ArrayPositions identify the field value in the source document.
// See document.Field for details.
type TokenLocation struct {
Field string
ArrayPositions []uint64
Start int
End int
Position int
}
// TokenFreq represents all the occurrences of a term in all fields of a
// document.
type TokenFreq struct {
Term []byte
Locations []*TokenLocation
frequency int
}
func (tf *TokenFreq) Frequency() int {
return tf.frequency
}
// TokenFrequencies maps document terms to their combined frequencies from all
// fields.
type TokenFrequencies map[string]*TokenFreq
func (tfs TokenFrequencies) MergeAll(remoteField string, other TokenFrequencies) {
// walk the new token frequencies
for tfk, tf := range other {
// set the remoteField value in incoming token freqs
for _, l := range tf.Locations {
l.Field = remoteField
}
existingTf, exists := tfs[tfk]
if exists {
existingTf.Locations = append(existingTf.Locations, tf.Locations...)
existingTf.frequency = existingTf.frequency + tf.frequency
} else {
tfs[tfk] = &TokenFreq{
Term: tf.Term,
frequency: tf.frequency,
Locations: make([]*TokenLocation, len(tf.Locations)),
}
copy(tfs[tfk].Locations, tf.Locations)
}
}
}
func TokenFrequency(tokens TokenStream, arrayPositions []uint64, includeTermVectors bool) TokenFrequencies {
rv := make(map[string]*TokenFreq, len(tokens))
if includeTermVectors {
tls := make([]TokenLocation, len(tokens))
tlNext := 0
for _, token := range tokens {
tls[tlNext] = TokenLocation{
ArrayPositions: arrayPositions,
Start: token.Start,
End: token.End,
Position: token.Position,
}
curr, ok := rv[string(token.Term)]
if ok {
curr.Locations = append(curr.Locations, &tls[tlNext])
curr.frequency++
} else {
rv[string(token.Term)] = &TokenFreq{
Term: token.Term,
Locations: []*TokenLocation{&tls[tlNext]},
frequency: 1,
}
}
tlNext++
}
} else {
for _, token := range tokens {
curr, exists := rv[string(token.Term)]
if exists {
curr.frequency++
} else {
rv[string(token.Term)] = &TokenFreq{
Term: token.Term,
frequency: 1,
}
}
}
}
return rv
} | example/github/starred/limo/vendor/github.com/blevesearch/bleve/analysis/freq.go | 0.704567 | 0.429429 | freq.go | starcoder |
package ast
import (
"github.com/goplus/gop/token"
)
// -----------------------------------------------------------------------------
// A SliceLit node represents a slice literal.
type SliceLit struct {
Lbrack token.Pos // position of "["
Elts []Expr // list of composite elements; or nil
Rbrack token.Pos // position of "]"
Incomplete bool // true if (source) expressions are missing in the Elts list
}
// Pos - position of first character belonging to the node
func (p *SliceLit) Pos() token.Pos {
return p.Lbrack
}
// End - position of first character immediately after the node
func (p *SliceLit) End() token.Pos {
return p.Rbrack + 1
}
func (*SliceLit) exprNode() {}
// -----------------------------------------------------------------------------
/*
// TernaryExpr represents `cond ? expr1 : expr2`
type TernaryExpr struct {
Cond Expr
Question token.Pos
X Expr
Colon token.Pos
Y Expr
}
// Pos - position of first character belonging to the node
func (p *TernaryExpr) Pos() token.Pos {
return p.Cond.Pos()
}
// End - position of first character immediately after the node
func (p *TernaryExpr) End() token.Pos {
return p.Y.End()
}
func (*TernaryExpr) exprNode() {}
*/
// -----------------------------------------------------------------------------
// ErrWrapExpr represents `expr!`, `expr?` or `expr?: defaultValue`
type ErrWrapExpr struct {
X Expr
Tok token.Token // ! or ?
TokPos token.Pos
Default Expr // can be nil
}
// Pos - position of first character belonging to the node
func (p *ErrWrapExpr) Pos() token.Pos {
return p.X.Pos()
}
// End - position of first character immediately after the node
func (p *ErrWrapExpr) End() token.Pos {
if p.Default != nil {
return p.Default.End()
}
return p.TokPos + 1
}
func (*ErrWrapExpr) exprNode() {}
// -----------------------------------------------------------------------------
// LambdaExpr represents
// `(x, y, ...) => exprOrExprTuple`
// `x => exprOrExprTuple`
// `=> exprOrExprTuple`
// here exprOrExprTuple represents
// `expr`
// `(expr1, expr2, ...)`
type LambdaExpr struct {
First, Last token.Pos
Lhs []*Ident
Rarrow token.Pos
Rhs []Expr
LhsHasParen bool
RhsHasParen bool
}
func (p *LambdaExpr) Pos() token.Pos {
return p.First
}
func (p *LambdaExpr) End() token.Pos {
return p.Last
}
func (*LambdaExpr) exprNode() {}
// -----------------------------------------------------------------------------
// ForPhrase represents `for k, v <- container, cond`
type ForPhrase struct {
For token.Pos // position of "for" keyword
Key, Value *Ident // Key may be nil
TokPos token.Pos // position of "<-" operator
X Expr // value to range over
Init Stmt // initialization statement; or nil
Cond Expr // value filter, can be nil
}
// Pos returns position of first character belonging to the node.
func (p *ForPhrase) Pos() token.Pos { return p.For }
// End returns position of first character immediately after the node.
func (p *ForPhrase) End() token.Pos { return p.X.End() }
func (p *ForPhrase) exprNode() {}
// ComprehensionExpr represents
// `[vexpr for k1, v1 <- container1, cond1 ...]` or
// `{vexpr for k1, v1 <- container1, cond1 ...}` or
// `{kexpr: vexpr for k1, v1 <- container1, cond1 ...}` or
// `{for k1, v1 <- container1, cond1 ...}` or
type ComprehensionExpr struct {
Lpos token.Pos // position of "[" or "{"
Tok token.Token // token.LBRACK '[' or token.LBRACE '{'
Elt Expr // *KeyValueExpr or Expr or nil
Fors []*ForPhrase
Rpos token.Pos // position of "]" or "}"
}
// Pos - position of first character belonging to the node
func (p *ComprehensionExpr) Pos() token.Pos {
return p.Lpos
}
// End - position of first character immediately after the node
func (p *ComprehensionExpr) End() token.Pos {
return p.Rpos + 1
}
func (*ComprehensionExpr) exprNode() {}
// -----------------------------------------------------------------------------
// A ForPhraseStmt represents a for statement with a for <- clause.
type ForPhraseStmt struct {
*ForPhrase
Body *BlockStmt
}
// Pos - position of first character belonging to the node
func (p *ForPhraseStmt) Pos() token.Pos {
return p.For
}
// End - position of first character immediately after the node
func (p *ForPhraseStmt) End() token.Pos {
return p.Body.End()
}
func (*ForPhraseStmt) stmtNode() {}
// ----------------------------------------------------------------------------- | ast/ast_gop.go | 0.742608 | 0.499329 | ast_gop.go | starcoder |
package cbor
import (
"io"
. "github.com/ipsn/go-ipfs/gxlibs/github.com/polydawn/refmt/tok"
)
type Encoder struct {
w quickWriter
stack []encoderPhase // When empty, and step returns done, all done.
current encoderPhase // Shortcut to end of stack.
// Note unlike decoder, we need no statekeeping space for definite-len map and array.
spareBytes []byte
}
func NewEncoder(w io.Writer) (d *Encoder) {
d = &Encoder{
w: newQuickWriterStream(w),
stack: make([]encoderPhase, 0, 10),
current: phase_anyExpectValue,
spareBytes: make([]byte, 8),
}
return
}
func (d *Encoder) Reset() {
d.stack = d.stack[0:0]
d.current = phase_anyExpectValue
}
type encoderPhase byte
// There's about twice as many phases that the cbor encoder can be in compared to the json encoder
// because the presense of indefinite vs definite length maps and arrays effectively adds a dimension to those.
const (
phase_anyExpectValue encoderPhase = iota
phase_mapDefExpectKeyOrEnd // must not yield break at end
phase_mapDefExpectValue // only necessary to flip back to DefExpectKey
phase_mapIndefExpectKeyOrEnd // must yield break at end
phase_mapIndefExpectValue // only necessary to flip back to IndefExpectKey
phase_arrDefExpectValueOrEnd // must not yield break at end
phase_arrIndefExpectValueOrEnd // must yield break at end
)
func (d *Encoder) pushPhase(p encoderPhase) {
d.current = p
d.stack = append(d.stack, d.current)
}
// Pop a phase from the stack; return 'true' if stack now empty.
func (d *Encoder) popPhase() bool {
n := len(d.stack) - 1
if n == 0 {
return true
}
if n < 0 { // the state machines are supposed to have already errored better
panic("cborEncoder stack overpopped")
}
d.current = d.stack[n-1]
d.stack = d.stack[0:n]
return false
}
func (d *Encoder) Step(tokenSlot *Token) (done bool, err error) {
/*
Though it reads somewhat backwards from how a human would probably intuit
cause and effect, switching on the token type we got first,
*then* switching for whether it is acceptable for our current phase... is by
far the shorter volume of code to write.
*/
phase := d.current
switch tokenSlot.Type {
case TMapOpen:
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
if tokenSlot.Length >= 0 {
d.pushPhase(phase_mapDefExpectKeyOrEnd)
d.emitMajorPlusLen(cborMajorMap, uint64(tokenSlot.Length))
} else {
d.pushPhase(phase_mapIndefExpectKeyOrEnd)
d.w.writen1(cborSigilIndefiniteMap)
}
return false, d.w.checkErr()
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
case TMapClose:
switch phase {
case phase_mapDefExpectKeyOrEnd:
return d.popPhase(), nil
case phase_mapIndefExpectKeyOrEnd:
d.w.writen1(cborSigilBreak)
return d.popPhase(), d.w.checkErr()
case phase_anyExpectValue, phase_mapDefExpectValue, phase_mapIndefExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForValue}
default:
panic("unreachable phase")
}
case TArrOpen:
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
if tokenSlot.Length >= 0 {
d.pushPhase(phase_arrDefExpectValueOrEnd)
d.emitMajorPlusLen(cborMajorArray, uint64(tokenSlot.Length))
} else {
d.pushPhase(phase_arrIndefExpectValueOrEnd)
d.w.writen1(cborSigilIndefiniteArray)
}
return false, d.w.checkErr()
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
case TArrClose:
switch phase {
case phase_arrDefExpectValueOrEnd:
return d.popPhase(), nil
case phase_arrIndefExpectValueOrEnd:
d.w.writen1(cborSigilBreak)
return d.popPhase(), d.w.checkErr()
case phase_anyExpectValue, phase_mapDefExpectValue, phase_mapIndefExpectValue:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForValue}
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
case TNull: // terminal value; not accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.w.writen1(cborSigilNil)
return phase == phase_anyExpectValue, d.w.checkErr()
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
case TString: // terminal value; YES, accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
goto emitStr
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
d.current += 1
goto emitStr
default:
panic("unreachable phase")
}
emitStr:
{
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.encodeString(tokenSlot.Str)
return phase == phase_anyExpectValue, d.w.checkErr()
}
case TBytes: // terminal value; not accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.encodeBytes(tokenSlot.Bytes)
return phase == phase_anyExpectValue, d.w.checkErr()
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
case TBool: // terminal value; not accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.encodeBool(tokenSlot.Bool)
return phase == phase_anyExpectValue, d.w.checkErr()
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
case TInt: // terminal value; YES, accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
goto emitInt
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
d.current += 1
goto emitInt
default:
panic("unreachable phase")
}
emitInt:
{
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.encodeInt64(tokenSlot.Int)
return phase == phase_anyExpectValue, d.w.checkErr()
}
case TUint: // terminal value; YES, accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
goto emitUint
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
d.current += 1
goto emitUint
default:
panic("unreachable phase")
}
emitUint:
{
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.encodeUint64(tokenSlot.Uint)
return phase == phase_anyExpectValue, d.w.checkErr()
}
case TFloat64: // terminal value; not accepted as map key.
switch phase {
case phase_mapDefExpectValue, phase_mapIndefExpectValue:
d.current -= 1
fallthrough
case phase_anyExpectValue, phase_arrDefExpectValueOrEnd, phase_arrIndefExpectValueOrEnd:
if tokenSlot.Tagged {
d.emitMajorPlusLen(cborMajorTag, uint64(tokenSlot.Tag))
}
d.encodeFloat64(tokenSlot.Float64)
return phase == phase_anyExpectValue, d.w.checkErr()
case phase_mapDefExpectKeyOrEnd, phase_mapIndefExpectKeyOrEnd:
return true, &ErrInvalidTokenStream{Got: *tokenSlot, Acceptable: tokenTypesForKey}
default:
panic("unreachable phase")
}
default:
panic("unhandled token type")
}
} | gxlibs/github.com/polydawn/refmt/cbor/cborEncoder.go | 0.600305 | 0.540803 | cborEncoder.go | starcoder |
package bytesutil
import (
"bytes"
"fmt"
"sort"
)
// Sort sorts a slice of byte slices.
func Sort(a [][]byte) {
sort.Sort(byteSlices(a))
}
func IsSorted(a [][]byte) bool {
return sort.IsSorted(byteSlices(a))
}
func SearchBytes(a [][]byte, x []byte) int {
return sort.Search(len(a), func(i int) bool { return bytes.Compare(a[i], x) >= 0 })
}
// SearchBytesFixed searches a for x using a binary search. The size of a must be a multiple of
// of x or else the function panics. There returned value is the index within a where x should
// exist. The caller should ensure that x does exist at this index.
func SearchBytesFixed(a []byte, sz int, fn func(x []byte) bool) int {
if len(a)%sz != 0 {
panic(fmt.Sprintf("x is not a multiple of a: %d %d", len(a), sz))
}
i, j := 0, len(a)-sz
for i < j {
h := int(uint(i+j) >> 1)
h -= h % sz
if !fn(a[h : h+sz]) {
i = h + sz
} else {
j = h
}
}
return i
}
// Union returns the union of a & b in sorted order.
func Union(a, b [][]byte) [][]byte {
n := len(b)
if len(a) > len(b) {
n = len(a)
}
other := make([][]byte, 0, n)
for {
if len(a) > 0 && len(b) > 0 {
if cmp := bytes.Compare(a[0], b[0]); cmp == 0 {
other, a, b = append(other, a[0]), a[1:], b[1:]
} else if cmp == -1 {
other, a = append(other, a[0]), a[1:]
} else {
other, b = append(other, b[0]), b[1:]
}
} else if len(a) > 0 {
other, a = append(other, a[0]), a[1:]
} else if len(b) > 0 {
other, b = append(other, b[0]), b[1:]
} else {
return other
}
}
}
// Intersect returns the intersection of a & b in sorted order.
func Intersect(a, b [][]byte) [][]byte {
n := len(b)
if len(a) > len(b) {
n = len(a)
}
other := make([][]byte, 0, n)
for len(a) > 0 && len(b) > 0 {
if cmp := bytes.Compare(a[0], b[0]); cmp == 0 {
other, a, b = append(other, a[0]), a[1:], b[1:]
} else if cmp == -1 {
a = a[1:]
} else {
b = b[1:]
}
}
return other
}
// Clone returns a copy of b.
func Clone(b []byte) []byte {
if b == nil {
return nil
}
buf := make([]byte, len(b))
copy(buf, b)
return buf
}
// CloneSlice returns a copy of a slice of byte slices.
func CloneSlice(a [][]byte) [][]byte {
other := make([][]byte, len(a))
for i := range a {
other[i] = Clone(a[i])
}
return other
}
// Pack converts a sparse array to a dense one. It removes sections of a containing
// runs of val of length width. The returned value is a subslice of a.
func Pack(a []byte, width int, val byte) []byte {
var i, j, iStart, jStart, end int
fill := make([]byte, width)
for i := 0; i < len(fill); i++ {
fill[i] = val
}
// Skip the first run that won't move
for ; i < len(a) && a[i] != val; i += width {
}
end = i
for i < len(a) {
// Find the next gap to remove
iStart = i
for i < len(a) && a[i] == val {
i += width
}
// Find the next non-gap to keep
jStart = i
for j = i; j < len(a) && a[j] != val; j += width {
}
if jStart == len(a) {
break
}
// Move the non-gap over the section to remove.
copy(a[end:], a[jStart:j])
i = iStart + len(a[jStart:j])
end += j - jStart
i = j
}
return a[:end]
}
type byteSlices [][]byte
func (a byteSlices) Len() int { return len(a) }
func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }
func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | plugin/mq2db/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go | 0.784195 | 0.608361 | bytesutil.go | starcoder |
package operations
import (
"hash/crc32"
"github.com/rameshvarun/ups/common"
)
// Diff takes in a base buffer, a modified buffer, and returns a PatchData object
// that can be used to write to a UPS file.
func Diff(base []byte, modified []byte) *common.PatchData {
// Cumulative list of blocks that we construct as we scan through the buffers.
var blocks []common.PatchBlock
// The end position of the last patch block that we saw.
lastBlock := uint64(0)
// The current block that we are constructing. `nil` if there is no current block.
var currentBlock *struct {
Data []byte
Start uint64
}
for pointer := 0; pointer < len(modified); pointer++ {
// Determine if the byte has been 'modified'
var different bool
if pointer >= len(base) {
// If the output file is larger than the input, and the byte in this extended
// region is non-zero, then it is 'modified'
different = modified[pointer] != 0
} else {
// Otherwise, simply check if the byte is different.
different = modified[pointer] != base[pointer]
}
if different {
// If the current byte is modified, but we are not constructing a block,
// we need to create an in-progress block, then add in the data.
if currentBlock == nil {
currentBlock = &struct {
Data []byte
Start uint64
}{
Data: []byte{},
Start: uint64(pointer),
}
}
// If we are constructing an in-progress block, just add in the data.
if currentBlock != nil {
if pointer >= len(base) {
currentBlock.Data = append(currentBlock.Data, modified[pointer])
} else {
currentBlock.Data = append(currentBlock.Data, base[pointer]^modified[pointer])
}
}
} else {
if currentBlock != nil {
// This block has ended.
blocks = append(blocks, common.PatchBlock{
Data: currentBlock.Data,
RelativeOffset: currentBlock.Start - lastBlock,
})
currentBlock = nil
// lastBlock needs to point to the byte after the unmodified byte that ended
// the block.
lastBlock = uint64(pointer) + 1
}
}
}
// If we ended the loop on a block, then we need to end that block.
if currentBlock != nil {
blocks = append(blocks, common.PatchBlock{
Data: currentBlock.Data,
RelativeOffset: currentBlock.Start - lastBlock,
})
}
// Return the full patch data structure.
return &common.PatchData{
InputFileSize: uint64(len(base)),
OutputFileSize: uint64(len(modified)),
PatchBlocks: blocks,
InputChecksum: crc32.ChecksumIEEE(base),
OutputChecksum: crc32.ChecksumIEEE(modified),
}
} | operations/diff.go | 0.665845 | 0.434221 | diff.go | starcoder |
package gldriver
import (
"encoding/binary"
"image"
"image/color"
"image/draw"
"github.com/oakmound/shiny/screen"
"golang.org/x/mobile/gl"
)
type textureImpl struct {
w *windowImpl
id gl.Texture
fb gl.Framebuffer
size image.Point
}
func (t *textureImpl) Size() image.Point { return t.size }
func (t *textureImpl) Bounds() image.Rectangle { return image.Rectangle{Max: t.size} }
func (t *textureImpl) Release() {
t.w.glctxMu.Lock()
defer t.w.glctxMu.Unlock()
if t.fb.Value != 0 {
t.w.glctx.DeleteFramebuffer(t.fb)
t.fb = gl.Framebuffer{}
}
t.w.glctx.DeleteTexture(t.id)
t.id = gl.Texture{}
}
func (t *textureImpl) Upload(dp image.Point, src screen.Image, sr image.Rectangle) {
buf := src.(*bufferImpl)
buf.preUpload()
// src2dst is added to convert from the src coordinate space to the dst
// coordinate space. It is subtracted to convert the other way.
src2dst := dp.Sub(sr.Min)
// Clip to the source.
sr = sr.Intersect(buf.Bounds())
// Clip to the destination.
dr := sr.Add(src2dst)
dr = dr.Intersect(t.Bounds())
if dr.Empty() {
return
}
// Bring dr.Min in dst-space back to src-space to get the pixel buffer offset.
pix := buf.rgba.Pix[buf.rgba.PixOffset(dr.Min.X-src2dst.X, dr.Min.Y-src2dst.Y):]
t.w.glctxMu.Lock()
defer t.w.glctxMu.Unlock()
t.w.glctx.BindTexture(gl.TEXTURE_2D, t.id)
width := dr.Dx()
if width*4 == buf.rgba.Stride {
t.w.glctx.TexSubImage2D(gl.TEXTURE_2D, 0, dr.Min.X, dr.Min.Y, width, dr.Dy(), gl.RGBA, gl.UNSIGNED_BYTE, pix)
return
}
// TODO: can we use GL_UNPACK_ROW_LENGTH with glPixelStorei for stride in
// ES 3.0, instead of uploading the pixels row-by-row?
for y, p := dr.Min.Y, 0; y < dr.Max.Y; y++ {
t.w.glctx.TexSubImage2D(gl.TEXTURE_2D, 0, dr.Min.X, y, width, 1, gl.RGBA, gl.UNSIGNED_BYTE, pix[p:])
p += buf.rgba.Stride
}
}
func (t *textureImpl) Fill(dr image.Rectangle, src color.Color, op draw.Op) {
minX := float64(dr.Min.X)
minY := float64(dr.Min.Y)
maxX := float64(dr.Max.X)
maxY := float64(dr.Max.Y)
mvp := calcMVP(
t.size.X, t.size.Y,
minX, minY,
maxX, minY,
minX, maxY,
)
glctx := t.w.glctx
t.w.glctxMu.Lock()
defer t.w.glctxMu.Unlock()
create := t.fb.Value == 0
if create {
t.fb = glctx.CreateFramebuffer()
}
glctx.BindFramebuffer(gl.FRAMEBUFFER, t.fb)
if create {
glctx.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, t.id, 0)
}
glctx.Viewport(0, 0, t.size.X, t.size.Y)
doFill(t.w.s, t.w.glctx, mvp, src, op)
// We can't restore the GL state (i.e. bind the back buffer, also known as
// gl.Framebuffer{Value: 0}) right away, since we don't necessarily know
// the right viewport size yet. It is valid to call textureImpl.Fill before
// we've gotten our first size.Event. We bind it lazily instead.
t.w.backBufferBound = false
}
var quadCoords = f32Bytes(binary.LittleEndian,
0, 0, // top left
1, 0, // top right
0, 1, // bottom left
1, 1, // bottom right
)
const textureVertexSrc = `#version 100
uniform mat3 mvp;
uniform mat3 uvp;
attribute vec3 pos;
attribute vec2 inUV;
varying vec2 uv;
void main() {
vec3 p = pos;
p.z = 1.0;
gl_Position = vec4(mvp * p, 1);
uv = (uvp * vec3(inUV, 1)).xy;
}
`
const textureFragmentSrc = `#version 100
precision mediump float;
varying vec2 uv;
uniform sampler2D sample;
void main() {
gl_FragColor = texture2D(sample, uv);
}
`
const fillVertexSrc = `#version 100
uniform mat3 mvp;
attribute vec3 pos;
void main() {
vec3 p = pos;
p.z = 1.0;
gl_Position = vec4(mvp * p, 1);
}
`
const fillFragmentSrc = `#version 100
precision mediump float;
uniform vec4 color;
void main() {
gl_FragColor = color;
}
` | driver/gldriver/texture.go | 0.522933 | 0.440229 | texture.go | starcoder |
package arrowtools
import (
"fmt"
"github.com/apache/arrow/go/arrow"
"github.com/apache/arrow/go/arrow/array"
"github.com/stretchr/testify/assert"
)
// ColumnsEqual returns a boolean indicating whether the data in the
// two given columns are equal. If the data are not equal, a brief
// message describing the difference is returned.
func ColumnsEqual(col1, col2 *array.Column) (bool, string) {
if col1.DataType().ID() != col2.DataType().ID() {
return false, "Inconsistent types"
}
chunks1 := col1.Data().Chunks()
chunks2 := col2.Data().Chunks()
if len(chunks1) != len(chunks2) {
return false, fmt.Sprintf("Unequal chunk counts, %d != %d", len(chunks1), len(chunks2))
}
for k := range chunks1 {
chunk1 := chunks1[k]
chunk2 := chunks2[k]
switch col1.DataType() {
case arrow.PrimitiveTypes.Uint8:
y1 := array.NewUint8Data(chunk1.Data())
y2 := array.NewUint8Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Uint8Values(), y2.Uint8Values()) {
return false, fmt.Sprintf("Unequal uint8 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Uint16:
y1 := array.NewUint16Data(chunk1.Data())
y2 := array.NewUint16Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Uint16Values(), y2.Uint16Values()) {
return false, fmt.Sprintf("Unequal uint16 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Uint32:
y1 := array.NewUint32Data(chunk1.Data())
y2 := array.NewUint32Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Uint32Values(), y2.Uint32Values()) {
return false, fmt.Sprintf("Unequal uint32 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Uint64:
y1 := array.NewUint64Data(chunk1.Data())
y2 := array.NewUint64Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Uint64Values(), y2.Uint64Values()) {
return false, fmt.Sprintf("Unequal uint64 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Int8:
y1 := array.NewInt8Data(chunk1.Data())
y2 := array.NewInt8Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Int8Values(), y2.Int8Values()) {
return false, fmt.Sprintf("Unequal int8 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Int16:
y1 := array.NewInt16Data(chunk1.Data())
y2 := array.NewInt16Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Int16Values(), y2.Int16Values()) {
return false, fmt.Sprintf("Unequal int16 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Int32:
y1 := array.NewInt32Data(chunk1.Data())
y2 := array.NewInt32Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Int32Values(), y2.Int32Values()) {
return false, fmt.Sprintf("Unequal int32 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Int64:
y1 := array.NewInt64Data(chunk1.Data())
y2 := array.NewInt64Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Int64Values(), y2.Int64Values()) {
return false, fmt.Sprintf("Unequal int64 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Float32:
y1 := array.NewFloat32Data(chunk1.Data())
y2 := array.NewFloat32Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Float32Values(), y2.Float32Values()) {
return false, fmt.Sprintf("Unequal float32 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.PrimitiveTypes.Float64:
y1 := array.NewFloat64Data(chunk1.Data())
y2 := array.NewFloat64Data(chunk2.Data())
if !assert.ObjectsAreEqualValues(y1.Float64Values(), y2.Float64Values()) {
return false, fmt.Sprintf("Unequal float64 values in chunk %d.\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
case arrow.BinaryTypes.String:
y1 := array.NewStringData(chunk1.Data())
y2 := array.NewStringData(chunk2.Data())
if y1.Len() != y2.Len() {
return false, fmt.Sprintf("Unequal lengths of string values in chunk %d\n", k)
}
if !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {
return false, fmt.Sprintf("Unequal valid mask in chunk %d.\n", k)
}
for i := 0; i < y1.Len(); i++ {
if y1.Value(i) != y2.Value(i) {
return false, fmt.Sprintf("Unequal string values in chunk %d\n", k)
}
}
default:
panic("unknown type")
}
}
return true, ""
}
// TablesEqual returns a boolean indicating whether the two
// given tables contain equal data. A message describing any
// differences is also returned.
func TablesEqual(tbl1, tbl2 array.Table) (bool, string) {
m1 := tbl1.NumCols()
m2 := tbl2.NumCols()
if m1 != m2 {
return false, fmt.Sprintf("Inconsistent number of columns, %d != %d", m1, m2)
}
for i := 0; i < int(m1); i++ {
col1 := tbl1.Column(i)
col2 := tbl2.Column(i)
b, msg := ColumnsEqual(col1, col2)
if !b {
return false, msg
}
}
return true, ""
} | gen_equality.go | 0.72594 | 0.690898 | gen_equality.go | starcoder |
package cephes
import (
"math"
"gonum/mathext/internal/gonum"
)
const (
maxGam = 171.624376956302725
big = 4.503599627370496e15
biginv = 2.22044604925031308085e-16
)
// Incbet computes the regularized incomplete beta function.
func Incbet(aa, bb, xx float64) float64 {
if aa <= 0 || bb <= 0 {
panic(badParamOutOfBounds)
}
if xx <= 0 || xx >= 1 {
if xx == 0 {
return 0
}
if xx == 1 {
return 1
}
panic(badParamOutOfBounds)
}
var flag int
if bb*xx <= 1 && xx <= 0.95 {
t := pseries(aa, bb, xx)
return transformT(t, flag)
}
w := 1 - xx
// Reverse a and b if x is greater than the mean.
var a, b, xc, x float64
if xx > aa/(aa+bb) {
flag = 1
a = bb
b = aa
xc = xx
x = w
} else {
a = aa
b = bb
xc = w
x = xx
}
if flag == 1 && (b*x) <= 1.0 && x <= 0.95 {
t := pseries(a, b, x)
return transformT(t, flag)
}
// Choose expansion for better convergence.
y := x*(a+b-2.0) - (a - 1.0)
if y < 0.0 {
w = incbcf(a, b, x)
} else {
w = incbd(a, b, x) / xc
}
// Multiply w by the factor
// x^a * (1-x)^b * Γ(a+b) / (a*Γ(a)*Γ(b))
var t float64
y = a * math.Log(x)
t = b * math.Log(xc)
if (a+b) < maxGam && math.Abs(y) < maxLog && math.Abs(t) < maxLog {
t = math.Pow(xc, b)
t *= math.Pow(x, a)
t /= a
t *= w
t *= 1.0 / gonum.Beta(a, b)
return transformT(t, flag)
}
// Resort to logarithms.
y += t - gonum.Lbeta(a, b)
y += math.Log(w / a)
if y < minLog {
t = 0.0
} else {
t = math.Exp(y)
}
return transformT(t, flag)
}
func transformT(t float64, flag int) float64 {
if flag == 1 {
if t <= machEp {
t = 1.0 - machEp
} else {
t = 1.0 - t
}
}
return t
}
// incbcf returns the incomplete beta integral evaluated by a continued fraction
// expansion.
func incbcf(a, b, x float64) float64 {
var xk, pk, pkm1, pkm2, qk, qkm1, qkm2 float64
var k1, k2, k3, k4, k5, k6, k7, k8 float64
var r, t, ans, thresh float64
var n int
k1 = a
k2 = a + b
k3 = a
k4 = a + 1.0
k5 = 1.0
k6 = b - 1.0
k7 = k4
k8 = a + 2.0
pkm2 = 0.0
qkm2 = 1.0
pkm1 = 1.0
qkm1 = 1.0
ans = 1.0
r = 1.0
thresh = 3.0 * machEp
for n = 0; n <= 300; n++ {
xk = -(x * k1 * k2) / (k3 * k4)
pk = pkm1 + pkm2*xk
qk = qkm1 + qkm2*xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
xk = (x * k5 * k6) / (k7 * k8)
pk = pkm1 + pkm2*xk
qk = qkm1 + qkm2*xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
if qk != 0 {
r = pk / qk
}
if r != 0 {
t = math.Abs((ans - r) / r)
ans = r
} else {
t = 1.0
}
if t < thresh {
return ans
}
k1 += 1.0
k2 += 1.0
k3 += 2.0
k4 += 2.0
k5 += 1.0
k6 -= 1.0
k7 += 2.0
k8 += 2.0
if (math.Abs(qk) + math.Abs(pk)) > big {
pkm2 *= biginv
pkm1 *= biginv
qkm2 *= biginv
qkm1 *= biginv
}
if (math.Abs(qk) < biginv) || (math.Abs(pk) < biginv) {
pkm2 *= big
pkm1 *= big
qkm2 *= big
qkm1 *= big
}
}
return ans
}
// incbd returns the incomplete beta integral evaluated by a continued fraction
// expansion.
func incbd(a, b, x float64) float64 {
var xk, pk, pkm1, pkm2, qk, qkm1, qkm2 float64
var k1, k2, k3, k4, k5, k6, k7, k8 float64
var r, t, ans, z, thresh float64
var n int
k1 = a
k2 = b - 1.0
k3 = a
k4 = a + 1.0
k5 = 1.0
k6 = a + b
k7 = a + 1.0
k8 = a + 2.0
pkm2 = 0.0
qkm2 = 1.0
pkm1 = 1.0
qkm1 = 1.0
z = x / (1.0 - x)
ans = 1.0
r = 1.0
thresh = 3.0 * machEp
for n = 0; n <= 300; n++ {
xk = -(z * k1 * k2) / (k3 * k4)
pk = pkm1 + pkm2*xk
qk = qkm1 + qkm2*xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
xk = (z * k5 * k6) / (k7 * k8)
pk = pkm1 + pkm2*xk
qk = qkm1 + qkm2*xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
if qk != 0 {
r = pk / qk
}
if r != 0 {
t = math.Abs((ans - r) / r)
ans = r
} else {
t = 1.0
}
if t < thresh {
return ans
}
k1 += 1.0
k2 -= 1.0
k3 += 2.0
k4 += 2.0
k5 += 1.0
k6 += 1.0
k7 += 2.0
k8 += 2.0
if (math.Abs(qk) + math.Abs(pk)) > big {
pkm2 *= biginv
pkm1 *= biginv
qkm2 *= biginv
qkm1 *= biginv
}
if (math.Abs(qk) < biginv) || (math.Abs(pk) < biginv) {
pkm2 *= big
pkm1 *= big
qkm2 *= big
qkm1 *= big
}
}
return ans
}
// pseries returns the incomplete beta integral evaluated by a power series. Use
// when b*x is small and x not too close to 1.
func pseries(a, b, x float64) float64 {
var s, t, u, v, n, t1, z, ai float64
ai = 1.0 / a
u = (1.0 - b) * x
v = u / (a + 1.0)
t1 = v
t = u
n = 2.0
s = 0.0
z = machEp * ai
for math.Abs(v) > z {
u = (n - b) * x / n
t *= u
v = t / (a + n)
s += v
n += 1.0
}
s += t1
s += ai
u = a * math.Log(x)
if (a+b) < maxGam && math.Abs(u) < maxLog {
t = 1.0 / gonum.Beta(a, b)
s = s * t * math.Pow(x, a)
} else {
t = -gonum.Lbeta(a, b) + u + math.Log(s)
if t < minLog {
s = 0.0
} else {
s = math.Exp(t)
}
}
return (s)
} | mathext/internal/cephes/incbeta.go | 0.712432 | 0.439627 | incbeta.go | starcoder |
package evaluator
import (
"fmt"
"github.com/optimizely/go-sdk/pkg/entities"
)
const customAttributeType = "custom_attribute"
const (
// "and" operator returns true if all conditions evaluate to true
andOperator = "and"
// "not" operator negates the result of the given condition
notOperator = "not"
// "or" operator returns true if any of the conditions evaluate to true
// orOperator = "or"
)
// TreeEvaluator evaluates a tree
type TreeEvaluator interface {
Evaluate(*entities.TreeNode, *entities.TreeParameters) (evalResult, isValid bool)
}
// MixedTreeEvaluator evaluates a tree of mixed node types (condition node or audience nodes)
type MixedTreeEvaluator struct {
}
// NewMixedTreeEvaluator creates a condition tree evaluator with the out-of-the-box condition evaluators
func NewMixedTreeEvaluator() *MixedTreeEvaluator {
return &MixedTreeEvaluator{}
}
// Evaluate returns whether the userAttributes satisfy the given condition tree and the evaluation of the condition is valid or not (to handle null bubbling)
func (c MixedTreeEvaluator) Evaluate(node *entities.TreeNode, condTreeParams *entities.TreeParameters) (evalResult, isValid bool) {
operator := node.Operator
if operator != "" {
switch operator {
case andOperator:
return c.evaluateAnd(node.Nodes, condTreeParams)
case notOperator:
return c.evaluateNot(node.Nodes, condTreeParams)
default: // orOperator
return c.evaluateOr(node.Nodes, condTreeParams)
}
}
var result bool
var err error
switch v := node.Item.(type) {
case entities.Condition:
evaluator := CustomAttributeConditionEvaluator{}
result, err = evaluator.Evaluate(node.Item.(entities.Condition), condTreeParams)
case string:
evaluator := AudienceConditionEvaluator{}
result, err = evaluator.Evaluate(node.Item.(string), condTreeParams)
default:
fmt.Printf("I don't know about type %T!\n", v)
return false, false
}
if err != nil {
// Result is invalid
return false, false
}
return result, true
}
func (c MixedTreeEvaluator) evaluateAnd(nodes []*entities.TreeNode, condTreeParams *entities.TreeParameters) (evalResult, isValid bool) {
sawInvalid := false
for _, node := range nodes {
result, isValid := c.Evaluate(node, condTreeParams)
if !isValid {
return false, isValid
} else if !result {
return result, isValid
}
}
if sawInvalid {
// bubble up the invalid result
return false, false
}
return true, true
}
func (c MixedTreeEvaluator) evaluateNot(nodes []*entities.TreeNode, condTreeParams *entities.TreeParameters) (evalResult, isValid bool) {
if len(nodes) > 0 {
result, isValid := c.Evaluate(nodes[0], condTreeParams)
if !isValid {
return false, false
}
return !result, isValid
}
return false, false
}
func (c MixedTreeEvaluator) evaluateOr(nodes []*entities.TreeNode, condTreeParams *entities.TreeParameters) (evalResult, isValid bool) {
sawInvalid := false
for _, node := range nodes {
result, isValid := c.Evaluate(node, condTreeParams)
if !isValid {
sawInvalid = true
} else if result {
return result, isValid
}
}
if sawInvalid {
// bubble up the invalid result
return false, false
}
return false, true
} | pkg/decision/evaluator/condition_tree.go | 0.760117 | 0.422564 | condition_tree.go | starcoder |
package graphics
import (
"github.com/go-gl/mathgl/mgl32"
"math"
)
const cameraSpeed = float64(320) * 2
const sensitivity = float32(0.03)
var minVerticalRotation = mgl32.DegToRad(90)
var maxVerticalRotation = mgl32.DegToRad(270)
// Camera
type Camera struct {
transform Transform
fov float32
aspectRatio float32
up mgl32.Vec3
right mgl32.Vec3
direction mgl32.Vec3
worldUp mgl32.Vec3
}
// Fov
func (camera *Camera) Fov() float32 {
return camera.fov
}
// AspectRatio
func (camera *Camera) AspectRatio() float32 {
return camera.aspectRatio
}
// Transform Returns this entity's transform component
func (camera *Camera) Transform() *Transform {
return &camera.transform
}
// Forwards
func (camera *Camera) Forwards(dt float64) {
camera.Transform().Translation = camera.Transform().Translation.Add(camera.direction.Mul(float32(cameraSpeed * dt)))
}
// Backwards
func (camera *Camera) Backwards(dt float64) {
camera.Transform().Translation = camera.Transform().Translation.Sub(camera.direction.Mul(float32(cameraSpeed * dt)))
}
// Left
func (camera *Camera) Left(dt float64) {
camera.Transform().Translation = camera.Transform().Translation.Sub(camera.right.Mul(float32(cameraSpeed * dt)))
}
// Right
func (camera *Camera) Right(dt float64) {
camera.Transform().Translation = camera.Transform().Translation.Add(camera.right.Mul(float32(cameraSpeed * dt)))
}
// Rotate
func (camera *Camera) Rotate(x, y, z float32) {
camera.Transform().Orientation.V[0] = camera.Transform().Orientation.V[0] + (x * sensitivity)
camera.Transform().Orientation.V[1] = camera.Transform().Orientation.V[1] + (y * sensitivity)
camera.Transform().Orientation.V[2] = camera.Transform().Orientation.V[2] + (z * sensitivity)
// Lock vertical rotation
if camera.Transform().Orientation.V[2] > maxVerticalRotation {
camera.Transform().Orientation.V[2] = maxVerticalRotation
}
if camera.Transform().Orientation.V[2] < minVerticalRotation {
camera.Transform().Orientation.V[2] = minVerticalRotation
}
}
// Update updates the camera position
func (camera *Camera) Update(dt float64) {
camera.updateVectors()
}
// updateVectors Updates the camera directional properties with any changes
func (camera *Camera) updateVectors() {
rot := camera.Transform().Orientation
// Calculate the new Front vector
camera.direction = mgl32.Vec3{
float32(math.Cos(float64(rot.V[2])) * math.Sin(float64(rot.V[0]))),
float32(math.Cos(float64(rot.V[2])) * math.Cos(float64(rot.V[0]))),
float32(math.Sin(float64(rot.V[2]))),
}
// Also re-calculate the right and up vector
camera.right = mgl32.Vec3{
float32(math.Sin(float64(rot.V[0]) - math.Pi/2)),
float32(math.Cos(float64(rot.V[0]) - math.Pi/2)),
0,
}
camera.up = camera.right.Cross(camera.direction)
}
// ModelMatrix returns identity matrix (camera model is our position!)
func (camera *Camera) ModelMatrix() mgl32.Mat4 {
return mgl32.Ident4()
}
// ViewMatrix calculates the cameras View matrix
func (camera *Camera) ViewMatrix() mgl32.Mat4 {
return mgl32.LookAtV(
camera.Transform().Translation,
camera.Transform().Translation.Add(camera.direction),
camera.up)
}
// ProjectionMatrix calculates projection matrix.
// This is unlikely to change throughout program lifetime, but could do
func (camera *Camera) ProjectionMatrix() mgl32.Mat4 {
return mgl32.Perspective(camera.fov, camera.aspectRatio, 0.2, 16384)
}
// NewCamera returns a new camera
// fov should be provided in radians
func NewCamera(fov float32, aspectRatio float32) *Camera {
return &Camera{
fov: fov,
aspectRatio: aspectRatio,
up: mgl32.Vec3{0, 1, 0},
worldUp: mgl32.Vec3{0, 1, 0},
direction: mgl32.Vec3{0, 0, -1},
}
} | framework/graphics/camera.go | 0.885675 | 0.679128 | camera.go | starcoder |
package diff
import (
"github.com/pusinc/golang-support/helper/contain"
"reflect"
)
func Interface(x interface{}, y interface{}) (reflect.Value, reflect.Value) {
xValue, yValue := reflect.Value{}, reflect.Value{}
if xValueNode, ok := x.(reflect.Value); ok {
xValue = xValueNode
} else {
xValue = reflect.ValueOf(x)
}
if yValueNode, ok := y.(reflect.Value); ok {
yValue = yValueNode
} else {
yValue = reflect.ValueOf(y)
}
if xValue.Type().Kind() == reflect.Ptr {
xValue = xValue.Elem()
}
if yValue.Type().Kind() == reflect.Ptr {
yValue = yValue.Elem()
}
leftSlice := reflect.MakeSlice(xValue.Type(), 0, 0)
rightSlice := reflect.MakeSlice(xValue.Type(), 0, 0)
for i := 0; i < xValue.Len(); i++ {
v := xValue.Index(i)
if contain.Interface(y, v) == false {
leftSlice = reflect.Append(leftSlice, v)
}
}
for i := 0; i < yValue.Len(); i++ {
v := yValue.Index(i)
if contain.Interface(x, v) == false {
rightSlice = reflect.Append(rightSlice, v)
}
}
return leftSlice, rightSlice
}
func String(x []string, y []string) ([]string, []string) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]string), yResult.Interface().([]string)
}
func Int(x []int, y []int) ([]int, []int) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]int), yResult.Interface().([]int)
}
func Int8(x []int8, y []int8) ([]int8, []int8) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]int8), yResult.Interface().([]int8)
}
func Int16(x []int16, y []int16) ([]int16, []int16) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]int16), yResult.Interface().([]int16)
}
func Int32(x []int32, y []int32) ([]int32, []int32) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]int32), yResult.Interface().([]int32)
}
func Int64(x []int64, y []int64) ([]int64, []int64) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]int64), yResult.Interface().([]int64)
}
func Uint(x []uint, y []uint) ([]uint, []uint) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]uint), yResult.Interface().([]uint)
}
func Uint8(x []uint8, y []uint8) ([]uint8, []uint8) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]uint8), yResult.Interface().([]uint8)
}
func Uint16(x []uint16, y []uint16) ([]uint16, []uint16) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]uint16), yResult.Interface().([]uint16)
}
func Uint32(x []uint32, y []uint32) ([]uint32, []uint32) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]uint32), yResult.Interface().([]uint32)
}
func Uint64(x []uint64, y []uint64) ([]uint64, []uint64) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]uint64), yResult.Interface().([]uint64)
}
func Float32(x []float32, y []float32) ([]float32, []float32) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]float32), yResult.Interface().([]float32)
}
func Float64(x []float64, y []float64) ([]float64, []float64) {
xResult, yResult := Interface(x, y)
return xResult.Interface().([]float64), yResult.Interface().([]float64)
} | helper/diff/diff.go | 0.579995 | 0.449091 | diff.go | starcoder |
package sweetiebot
import (
"fmt"
"strconv"
"strings"
"github.com/bwmarrin/discordgo"
)
type AddCommand struct {
funcmap map[string]func(string) string
}
func (c *AddCommand) Name() string {
return "Add"
}
func (c *AddCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```No collection given```", false
}
if len(args) < 2 {
return "```Can't add empty string!```", false
}
collections := strings.Split(args[0], "+")
for _, v := range collections {
_, ok := info.config.Collections[v]
if !ok {
return fmt.Sprintf("```The %s collection does not exist!```", v), false
}
}
add := ""
length := make([]string, len(collections), len(collections))
arg := strings.Join(args[1:], " ")
for k, v := range collections {
info.config.Collections[v][arg] = true
fn, ok := c.funcmap[v]
length[k] = fmt.Sprintf("Length of %s: %v", v, strconv.Itoa(len(info.config.Collections[v])))
if ok {
add += " " + fn(arg)
}
}
info.SaveConfig()
return ExtraSanitize(fmt.Sprintf("```Added %s to %s%s. \n%s```", arg, strings.Join(collections, ", "), add, strings.Join(length, "\n"))), false
}
func (c *AddCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[collection(s)] [arbitrary string]", "Adds [arbitrary string] to [collection] (which can be multiple collections by specifying \"collection+collection\"), then calls a handler function for that specific collection.")
}
func (c *AddCommand) UsageShort() string { return "Adds a line to a collection." }
type RemoveCommand struct {
funcmap map[string]func(string) string
}
func (c *RemoveCommand) Name() string {
return "Remove"
}
func (c *RemoveCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```No collection given```", false
}
if len(args) < 2 {
return "```Can't remove an empty string!```", false
}
collection := args[0]
cmap, ok := info.config.Collections[collection]
if !ok {
return "```That collection does not exist!```", false
}
arg := strings.Join(args[1:], " ")
_, ok = cmap[arg]
if !ok {
return "```Could not find " + arg + "!```", false
}
delete(info.config.Collections[collection], arg)
fn, ok := c.funcmap[collection]
retval := "```Removed " + arg + " from " + collection + ". Length of " + collection + ": " + strconv.Itoa(len(info.config.Collections[collection])) + "```"
if ok {
retval = fn(arg)
}
info.SaveConfig()
return ExtraSanitize(retval), false
}
func (c *RemoveCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[collection] [arbitrary string]", "Removes [arbitrary string] from [collection] (no quotes are required) and calls a handler function for that collection.")
}
func (c *RemoveCommand) UsageShort() string { return "Removes a line from a collection." }
type CollectionsCommand struct {
}
func (c *CollectionsCommand) Name() string {
return "Collections"
}
func (c *CollectionsCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
s := make([]string, 0, len(info.config.Collections))
for k, v := range info.config.Collections {
s = append(s, fmt.Sprintf("%s (%v items)", k, len(v)))
}
return "```No collection specified. All collections:\n" + ExtraSanitize(strings.Join(s, "\n")) + "```", false
}
arg := args[0]
cmap, ok := info.config.Collections[arg]
if !ok {
return "```That collection doesn't exist! Use this command with no arguments to see a list of all collections.```", false
}
return "```" + ExtraSanitize(arg+" contains:\n"+strings.Join(MapToSlice(cmap), "\n")) + "```", false
}
func (c *CollectionsCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "", "Lists all the collections that sweetiebot is using.")
}
func (c *CollectionsCommand) UsageShort() string { return "Lists all collections." }
type PickCommand struct {
}
func (c *PickCommand) Name() string {
return "Pick"
}
func (c *PickCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
s := make([]string, 0, len(info.config.Collections))
for k, v := range info.config.Collections {
s = append(s, fmt.Sprintf("%s (%v items)", k, len(v)))
}
return "```No collection specified. All collections:\n" + ExtraSanitize(strings.Join(s, "\n")) + "```", false
}
arg := strings.ToLower(args[0])
if arg == "spoiler" || arg == "emote" {
return "```You cannot pick an item from that collection.```", false
}
cmap, ok := info.config.Collections[arg]
if !ok {
return "```That collection doesn't exist! Use this command with no arguments to see a list of all collections.```", false
}
if len(cmap) > 0 {
return ReplaceAllMentions(MapGetRandomItem(cmap)), false
}
return "```That collection is empty.```", false
}
func (c *PickCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[collection]", "Picks a random item from the given collection and returns it.")
}
func (c *PickCommand) UsageShort() string { return "Picks a random item." }
type NewCommand struct {
}
func (c *NewCommand) Name() string {
return "New"
}
func (c *NewCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```You have to provide a new collection name.```", false
}
collection := strings.ToLower(args[0])
if strings.ContainsAny(collection, "+") {
return "```Don't make collection names with + in them, dumbass!```", false
}
_, ok := info.config.Collections[collection]
if ok {
return "```That collection already exists!```", false
}
info.config.Collections[collection] = make(map[string]bool)
info.SaveConfig()
return "```Created the " + collection + " collection.```", false
}
func (c *NewCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[collection]", "Creates a new collection with the given name, provided the collection does not already exist.")
}
func (c *NewCommand) UsageShort() string { return "Creates a new collection." }
type DeleteCommand struct {
}
func (c *DeleteCommand) Name() string {
return "Delete"
}
func (c *DeleteCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```You have to provide a collection name.```", false
}
collection := strings.ToLower(args[0])
_, ok := info.config.Collections[collection]
if !ok {
return "```That collection doesn't exist!```", false
}
_, ok = map[string]bool{"emote": true, "bored": true, "status": true, "spoiler": true, "bucket": true}[collection]
if ok {
return "```You can't delete that collection!```", false
}
delete(info.config.Collections, collection)
info.SaveConfig()
return "```Deleted the " + collection + " collection.```", false
}
func (c *DeleteCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[collection]", "Deletes the collection with the given name.")
}
func (c *DeleteCommand) UsageShort() string { return "Deletes a collection." }
type SearchCollectionCommand struct {
}
func (c *SearchCollectionCommand) Name() string {
return "SearchCollection"
}
func (c *SearchCollectionCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```You have to provide a new collection name.```", false
}
if len(args) < 2 {
return "```You have to provide something to search for (use !collections to dump the contents of a collection).```", false
}
collection := strings.ToLower(args[0])
if collection == "spoiler" {
return "```You can't search in that collection.```", false
}
cmap, ok := info.config.Collections[collection]
if !ok {
return "```That collection doesn't exist! Use !collections without any arguments to list them.```", false
}
results := []string{}
arg := strings.Join(args[1:], " ")
for k, _ := range cmap {
if strings.Contains(k, arg) {
results = append(results, k)
}
}
if len(results) > 0 {
return "```The following collection entries match your query:\n" + ExtraSanitize(strings.Join(results, "\n")) + "```", len(results) > 6
}
return "```No results found in the " + collection + " collection.```", false
}
func (c *SearchCollectionCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[collection] [arbitrary string]", "Returns all members of the given collection that match the search query.")
}
func (c *SearchCollectionCommand) UsageShort() string { return "Searches a collection." }
type ImportCommand struct {
}
func (c *ImportCommand) Name() string {
return "Import"
}
func (c *ImportCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```No source server provided.```", false
}
other := []*GuildInfo{}
str := args[0]
exact := false
if str[len(str)-1] == '@' {
str = str[:len(str)-1]
exact = true
}
for _, v := range sb.guilds {
if exact {
if strings.Compare(strings.ToLower(v.Guild.Name), strings.ToLower(str)) == 0 {
other = append(other, v)
}
} else {
if strings.Contains(strings.ToLower(v.Guild.Name), strings.ToLower(str)) {
other = append(other, v)
}
}
}
if len(other) > 1 {
names := make([]string, len(other), len(other))
for i := 0; i < len(other); i++ {
names[i] = other[i].Guild.Name
}
return fmt.Sprintf("```Could be any of the following servers: \n%s```", ExtraSanitize(strings.Join(names, "\n"))), len(names) > 8
}
if len(other) < 1 {
return fmt.Sprintf("```Could not find any server matching %s!```", args[0]), false
}
if !other[0].config.Importable {
return "```That server has not made their collections importable by other servers. If this is a public server, you can ask a moderator on that server to run \"!setconfig importable true\" if they wish to make their collections public.```", false
}
if len(args) < 2 {
return "```No source collection provided.```", false
}
source := args[1]
target := source
if len(args) > 2 {
target = args[2]
}
sourceCollection, ok := other[0].config.Collections[source]
if !ok {
return fmt.Sprintf("```The source collection (%s) does not exist on the source server (%s)!```", source, other[0].Guild.Name), false
}
targetCollection, tok := info.config.Collections[target]
if !tok {
return fmt.Sprintf("```The target collection (%s) does not exist on this server! Please manually create this collection using !new if you actually intended this.```", target), false
}
for k, v := range sourceCollection {
targetCollection[k] = v
}
info.SaveConfig()
return fmt.Sprintf("```Successfully merged \"%s\" from %s into \"%s\" on this server. New size: %v```", source, other[0].Guild.Name, target, len(targetCollection)), false
}
func (c *ImportCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[source server] [source collection] [target collection]", "Adds all elements from the source collection on the source server to the target collection on this server. If no target is specified, attempts to copy all items into a collection of the same name as the source. Example: \"!import Manechat cool notcool\"")
}
func (c *ImportCommand) UsageShort() string { return "Imports a collection from another server." } | sweetiebot/collections_command.go | 0.657648 | 0.556159 | collections_command.go | starcoder |
package de
import (
"github.com/nlpodyssey/spago/pkg/mat"
"github.com/nlpodyssey/spago/pkg/mat/rand"
"github.com/nlpodyssey/spago/pkg/utils"
"math"
)
type Mutator interface {
Mutate(p *Population)
}
var _ Mutator = &RandomMutation{}
type RandomMutation struct {
Bound float64
}
func NewRandomMutation(bound float64) *RandomMutation {
return &RandomMutation{
Bound: bound,
}
}
// Mutate executes the mutation generating a "donor vector" for every element of the population.
// For each vector xi in the current generation, called target vector, a vector yi, called donor vector, is obtained
// as linear combination of some vectors in the population selected according to DE/rand/1 strategy, where
// yi = clip(xa + MutationFactor * (xb − xc))
func (m *RandomMutation) Mutate(p *Population) {
for i, member := range p.Members {
extracted := rand.GetUniqueRandomInt(3, len(p.Members), func(r int) bool { return r != i })
xc := p.Members[extracted[2]].TargetVector
xb := p.Members[extracted[1]].TargetVector
xa := p.Members[extracted[0]].TargetVector
donor := xa.Add(xb.Sub(xc).ProdScalarInPlace(member.MutationFactor))
donor.ClipInPlace(-m.Bound, +m.Bound)
member.DonorVector = donor.(*mat.Dense)
}
}
var _ Mutator = &DeglMutation{}
// Differential Evolution with Global and Local Neighborhoods mutation strategy
// Reference:
// "Design of Two-Channel Quadrature Mirror Filter Banks Using Differential Evolution with Global and Local Neighborhoods"
// Authors: <NAME>, <NAME>, <NAME>, <NAME> (2011)
// (https://www.springerprofessional.de/en/design-of-two-channel-quadrature-mirror-filter-banks-using-diffe/3805398)
type DeglMutation struct {
NeighborhoodRadius float64
Bound float64
}
func NewDeglMutation(NeighborhoodRadius, bound float64) *DeglMutation {
return &DeglMutation{
NeighborhoodRadius: NeighborhoodRadius,
Bound: bound,
}
}
// Mutate calculate the mutated vector (donor vector) as:
// G = xi + MutationFactor (best − xi) + MutationFactor (xa − xb)
// L = xi + MutationFactor (bestNeighbor − xi) + MutationFactor (xc − xd)
// yi = clip(w * L + (1-w) * G)
func (m *DeglMutation) Mutate(p *Population) {
windowSize := int(float64(len(p.Members)) * m.NeighborhoodRadius)
bestIndex, _ := p.FindBest(0, len(p.Members)-1, math.Inf(+1), 0)
for i, member := range p.Members {
except := func(r int) bool { return r != i }
extracted := rand.GetUniqueRandomInt(2, len(p.Members), except)
neighbors := utils.GetNeighborsIndices(len(p.Members), i, windowSize)
extractedNeighbors := rand.GetUniqueRandomIndices(2, neighbors, except)
bestNeighborIndex, _ := p.FindBestNeighbor(i, windowSize)
bestNeighbor := p.Members[bestNeighborIndex].TargetVector
best := p.Members[bestIndex].TargetVector
xi := member.TargetVector
xb := p.Members[extracted[1]].TargetVector
xa := p.Members[extracted[0]].TargetVector
xd := p.Members[extractedNeighbors[1]].TargetVector
xc := p.Members[extractedNeighbors[0]].TargetVector
f := member.MutationFactor
w := member.WeightFactor
diff1 := xa.Sub(xb).ProdScalarInPlace(f)
diff2 := xc.Sub(xd).ProdScalarInPlace(f)
diff3 := best.Sub(xi).ProdScalarInPlace(f)
diff4 := bestNeighbor.Sub(xi).ProdScalarInPlace(f)
l := xi.Add(diff4).AddInPlace(diff2).ProdScalarInPlace(1.0 - w)
g := xi.Add(diff3).AddInPlace(diff1).ProdScalarInPlace(w)
donor := g.Add(l)
donor.ClipInPlace(-m.Bound, +m.Bound)
member.DonorVector = donor.(*mat.Dense)
}
} | pkg/ml/optimizers/de/mutator.go | 0.625781 | 0.449755 | mutator.go | starcoder |
package rpn
import "math/big"
// Evaluation context. This type is exported to allow eventual user-supplied
// operations.
type Evaluator struct {
Stack []interface{}
Vars map[string]interface{}
Names []string
Consts []*big.Rat
N, C int
}
func (e *Evaluator) eval(ops []operator) (err error) {
for _, op := range ops {
if err = opFuncs[op](e); err != nil {
return err
}
}
return nil
}
// Helper to get the top element on the stack.
func (e *Evaluator) Top() interface{} {
return e.Stack[len(e.Stack)-1]
}
// Helper to get and remove the top element on the stack.
func (e *Evaluator) Pop() interface{} {
v := e.Top()
e.Stack = e.Stack[:len(e.Stack)-1]
return v
}
// Helper to set the top element on the stack.
func (e *Evaluator) SetTop(v interface{}) {
e.Stack[len(e.Stack)-1] = v
}
type opFunc func(*Evaluator) error
var opFuncs = [...]opFunc{
oNOP: func(*Evaluator) error { return nil },
oLOAD: func(e *Evaluator) error {
v := e.Vars[e.Names[e.N]]
switch i := v.(type) {
case *big.Int:
e.Stack = append(e.Stack, new(big.Int).Set(i))
case *big.Rat:
e.Stack = append(e.Stack, new(big.Rat).Set(i))
default:
return MissingVar{e.Names[e.N]}
}
e.N++
return nil
},
oCONST: func(e *Evaluator) error {
v := e.Consts[e.C]
if v == nil {
e.Stack = append(e.Stack, nil)
} else if v.IsInt() {
e.Stack = append(e.Stack, new(big.Int).Set(v.Num()))
} else {
e.Stack = append(e.Stack, new(big.Rat).Set(v))
}
e.C++
return nil
},
oABS: numericUnary("ABS", (*big.Int).Abs, (*big.Rat).Abs),
oADD: numericBinary("ADD", (*big.Int).Add, (*big.Rat).Add),
oMUL: numericBinary("MUL", (*big.Int).Mul, (*big.Rat).Mul),
oNEG: numericUnary("NEG", (*big.Int).Neg, (*big.Rat).Neg),
oQUO: func(e *Evaluator) error {
x := e.Pop()
y := e.Top()
switch a := x.(type) {
case *big.Int:
if a.Sign() == 0 {
return DivByZero{}
}
switch b := y.(type) {
case *big.Int:
r := new(big.Rat).SetFrac(b, a)
if r.IsInt() {
b.Set(r.Num())
} else {
e.SetTop(r)
}
case *big.Rat:
b.Quo(b, new(big.Rat).SetFrac(a, big.NewInt(1)))
if b.IsInt() {
e.SetTop(b.Num())
}
default:
panic("QUO: wrong type on stack! (int/?)")
}
case *big.Rat:
if a.Sign() == 0 {
return DivByZero{}
}
switch b := y.(type) {
case *big.Int:
r := new(big.Rat).SetFrac(b, big.NewInt(1))
r.Quo(r, a)
if r.IsInt() {
e.SetTop(r.Num())
} else {
e.SetTop(r)
}
case *big.Rat:
b.Quo(b, a)
if b.IsInt() {
e.SetTop(b.Num())
}
default:
panic("QUO: wrong type on stack! (rat/?)")
}
default:
panic("QUO: wrong type on stack! (?/?)")
}
return nil
},
oSUB: numericBinary("SUB", (*big.Int).Sub, (*big.Rat).Sub),
oAND: integerBinary("AND", (*big.Int).And),
oANDNOT: integerBinary("ANDNOT", (*big.Int).AndNot),
oBINOMIAL: integerOverflow("BINOMIAL", (*big.Int).Binomial),
oDIV: integerDivision("DIV", (*big.Int).Div),
oEXP: func(e *Evaluator) error {
m := e.Pop()
x := e.Pop()
y := e.Top()
a, aok := x.(*big.Int)
b, bok := y.(*big.Int)
c, cok := m.(*big.Int) // heh
if !aok {
_ = x.(*big.Rat)
return TypeError{"int"}
}
if !bok {
_ = y.(*big.Rat)
return TypeError{"int"}
}
if c != nil && !cok { // heh
_ = m.(*big.Rat)
return TypeError{"int"}
}
invert := a.Sign() < 0
if invert {
c = nil
}
b.Exp(b, a.Abs(a), c)
if invert {
e.SetTop(new(big.Rat).SetFrac(big.NewInt(1), b))
}
return nil
},
oGCD: integerBinary("GCD", func(r, x, y *big.Int) *big.Int { return r.GCD(nil, nil, x, y) }),
oLSH: integerShift("LSH", (*big.Int).Lsh),
oMOD: integerDivision("MOD", (*big.Int).Mod),
oMODINVERSE: integerBinary("MODINV", (*big.Int).ModInverse),
oMULRANGE: integerOverflow("MULRANGE", (*big.Int).MulRange),
oNOT: func(e *Evaluator) error {
x := e.Top()
if a, ok := x.(*big.Int); ok {
a.Not(a)
} else {
_ = x.(*big.Rat)
return TypeError{"int"}
}
return nil
},
oOR: integerBinary("OR", (*big.Int).Or),
oREM: integerDivision("REM", (*big.Int).Rem),
oRSH: integerShift("RSH", (*big.Int).Rsh),
oXOR: integerBinary("XOR", (*big.Int).Xor),
oDENOM: func(e *Evaluator) error {
switch a := e.Top().(type) {
case *big.Rat:
e.SetTop(a.Denom())
case *big.Int:
a.SetUint64(1)
default:
panic("DENOM: wrong type on stack!")
}
return nil
},
oINV: func(e *Evaluator) error {
switch i := e.Top().(type) {
case *big.Int:
if i.Sign() == 0 {
return DivByZero{}
}
e.SetTop(new(big.Rat).SetFrac(big.NewInt(1), i))
case *big.Rat:
if i.Sign() == 0 {
return DivByZero{}
}
i.Inv(i)
if i.IsInt() {
e.SetTop(i.Num())
}
default:
panic("INV: wrong type on stack!")
}
return nil
},
oNUM: func(e *Evaluator) error {
switch a := e.Top().(type) {
case *big.Rat:
e.SetTop(a.Num())
case *big.Int:
// do nothing
default:
panic("num: wrong type on stack!")
}
return nil
},
oTRUNC: numericRound("TRUNC", func(e *Evaluator, a *big.Rat) { e.SetTop(a.Num().Quo(a.Num(), a.Denom())) }),
oFLOOR: numericRound("TRUNC", func(e *Evaluator, a *big.Rat) { e.SetTop(a.Num().Div(a.Num(), a.Denom())) }),
oCEIL: numericRound("TRUNC", func(e *Evaluator, a *big.Rat) {
q, r := a.Num().QuoRem(a.Num(), a.Denom(), new(big.Int))
if r.Sign() > 0 {
e.SetTop(q.Add(q, big.NewInt(1)))
} else {
e.SetTop(q)
}
}),
}
func numericUnary(name string, ints func(_, _ *big.Int) *big.Int, rats func(_, _ *big.Rat) *big.Rat) opFunc {
return func(e *Evaluator) error {
switch i := e.Top().(type) {
case *big.Int:
ints(i, i)
case *big.Rat:
rats(i, i)
default:
panic(name + ": wrong type on stack!")
}
return nil
}
}
func numericBinary(name string, ints func(_, _, _ *big.Int) *big.Int, rats func(_, _, _ *big.Rat) *big.Rat) opFunc {
return func(e *Evaluator) error {
x := e.Pop()
y := e.Top()
switch a := x.(type) {
case *big.Int:
switch b := y.(type) {
case *big.Int:
ints(b, b, a)
case *big.Rat:
rats(b, b, new(big.Rat).SetFrac(a, big.NewInt(1)))
if b.IsInt() {
e.SetTop(b.Num())
}
default:
panic(name + ": wrong type on stack! (int*?)")
}
case *big.Rat:
switch b := y.(type) {
case *big.Int:
r := new(big.Rat).SetFrac(b, big.NewInt(1))
rats(r, r, a)
if r.IsInt() {
e.SetTop(r.Num())
} else {
e.SetTop(r)
}
case *big.Rat:
rats(b, b, a)
if b.IsInt() {
e.SetTop(b.Num())
}
default:
panic(name + ": wrong type on stack! (rat*?)")
}
default:
panic(name + ": wrong type on stack! (?*?)")
}
return nil
}
}
func numericRound(name string, f func(*Evaluator, *big.Rat)) opFunc {
return func(e *Evaluator) error {
switch a := e.Top().(type) {
case *big.Int: // do nothing
case *big.Rat:
f(e, a)
default:
panic(name + ": unknown type on stack!")
}
return nil
}
}
func integerBinary(_ string, f func(_, _, _ *big.Int) *big.Int) opFunc {
return func(e *Evaluator) error {
x := e.Pop()
y := e.Top()
a, aok := x.(*big.Int)
b, bok := y.(*big.Int)
if !aok {
_ = x.(*big.Rat) // TODO: make this error more informative
return TypeError{"int"}
}
if !bok {
_ = y.(*big.Rat)
return TypeError{"int"}
}
f(b, b, a)
return nil
}
}
func integerDivision(_ string, f func(_, _, _ *big.Int) *big.Int) opFunc {
return func(e *Evaluator) error {
x := e.Pop()
y := e.Top()
a, aok := x.(*big.Int)
b, bok := y.(*big.Int)
if !aok {
_ = x.(*big.Rat) // TODO: make this error more informative
return TypeError{"int"}
}
if !bok {
_ = y.(*big.Rat)
return TypeError{"int"}
}
if a.Sign() == 0 {
return DivByZero{}
}
f(b, b, a)
return nil
}
}
func integerOverflow(_ string, f func(_ *big.Int, _, _ int64) *big.Int) opFunc {
return func(e *Evaluator) error {
x := e.Pop()
y := e.Top()
a, aok := x.(*big.Int)
b, bok := y.(*big.Int)
if !aok {
_ = x.(*big.Rat) // TODO: make this error more informative
return TypeError{"int"}
}
if !bok {
_ = y.(*big.Rat)
return TypeError{"int"}
}
if toobig64(a) || toobig64(b) {
return OverflowError{}
}
f(b, b.Int64(), a.Int64())
return nil
}
}
func integerShift(_ string, f func(_, _ *big.Int, _ uint) *big.Int) opFunc {
return func(e *Evaluator) error {
x := e.Pop()
y := e.Top()
a, aok := x.(*big.Int)
b, bok := y.(*big.Int)
if !aok {
_ = x.(*big.Rat) // TODO: make this error more informative
return TypeError{"int"}
}
if !bok {
_ = y.(*big.Rat)
return TypeError{"int"}
}
if toobiguint(a) || toobiguint(b) {
return OverflowError{}
}
f(b, b, uint(a.Uint64()))
return nil
}
}
func toobig64(x *big.Int) bool {
return x.Cmp(two63) >= 0
}
func toobiguint(x *big.Int) bool {
return x.Cmp(uintmax) >= 0
}
var two63 = new(big.Int).SetUint64(1 << 63)
var uintmax = new(big.Int).Add(new(big.Int).SetUint64(uint64(^uint(0))), big.NewInt(1)) | eval.go | 0.571408 | 0.464476 | eval.go | starcoder |
package circuit
import (
"encoding/json"
"fmt"
"math"
"github.com/heustis/tsp-solver-go/model"
"github.com/heustis/tsp-solver-go/stats"
)
type disparityClonableCircuit struct {
edges []model.CircuitEdge
distances map[model.CircuitVertex]*stats.DistanceGaps
length float64
}
func (c *disparityClonableCircuit) attachVertex(distance *model.DistanceToEdge) {
var edgeIndex int
c.edges, edgeIndex = model.SplitEdgeCopy(c.edges, distance.Edge, distance.Vertex)
if edgeIndex < 0 {
expectedEdgeJson, _ := json.Marshal(distance.Edge)
actualCircuitJson, _ := json.Marshal(c.edges)
panic(fmt.Errorf("edge not found in circuit=%p, expected=%s, \ncircuit=%s", c, string(expectedEdgeJson), string(actualCircuitJson)))
}
edgeA, edgeB := c.edges[edgeIndex], c.edges[edgeIndex+1]
c.length += edgeA.GetLength() + edgeB.GetLength() - distance.Edge.GetLength()
for _, stats := range c.distances {
stats.UpdateStats(distance.Edge, edgeA, edgeB)
}
}
func (c *disparityClonableCircuit) clone() *disparityClonableCircuit {
clone := &disparityClonableCircuit{
edges: make([]model.CircuitEdge, len(c.edges)),
distances: make(map[model.CircuitVertex]*stats.DistanceGaps),
length: c.length,
}
copy(clone.edges, c.edges)
for k, v := range c.distances {
clone.distances[k] = v.Clone()
}
return clone
}
func (c *disparityClonableCircuit) getLengthPerVertex() float64 {
return c.length / float64(len(c.edges))
}
func (c *disparityClonableCircuit) findNext(significance float64) []*model.DistanceToEdge {
// If there is only one vertex left to attach, attach it to its closest edge.
if len(c.distances) == 1 {
for _, stats := range c.distances {
return stats.ClosestEdges[0:1]
}
}
var vertexToUpdate model.CircuitVertex
var closestVertex *model.DistanceToEdge
// Find the most significant early gap to determine which vertex to attach to which edge (or edges).
// Prioritize earlier significant gaps over later, but more significant, gaps (e.g. a gap with a Z-score of 3.5 at index 1 should be prioritized over a gap with a Z-score of 5 at index 2).
gapIndex := math.MaxInt64
gapSignificance := 0.0
for v, stats := range c.distances {
// Track the vertex closest to its nearest edge, in the event there are no significant gaps.
if closestVertex == nil || stats.ClosestEdges[0].Distance < closestVertex.Distance {
closestVertex = stats.ClosestEdges[0]
}
// Determine if the current vertex has a significant gap in its edge distances that is:
// earlier than the current best, or more significant at the same index.
for i, currentGap := range stats.Gaps {
if i > gapIndex {
break
} else if currentSignificance := (currentGap - stats.GapAverage) / stats.GapStandardDeviation; currentSignificance < significance {
// Note: do not use the absolute value for this computation, as we only want significantly large gaps, not significantly small gaps.
continue
} else if currentSignificance > gapSignificance || i < gapIndex {
vertexToUpdate = v
gapIndex = i
gapSignificance = currentSignificance
}
}
}
// If all vertices lack significant gaps, select the vertex with the closest edge and clone the circuit once for each edge.
if vertexToUpdate == nil {
return c.distances[closestVertex.Vertex].ClosestEdges
}
return c.distances[vertexToUpdate].ClosestEdges[0 : gapIndex+1]
}
func (c *disparityClonableCircuit) update(significance float64) (clones []*disparityClonableCircuit) {
next := c.findNext(significance)
delete(c.distances, next[0].Vertex)
if numClones := len(next) - 1; numClones > 0 {
clones = make([]*disparityClonableCircuit, numClones)
for i, cloneDistance := range next {
if cloneIndex := i - 1; cloneIndex >= 0 {
clones[cloneIndex] = c.clone()
clones[cloneIndex].attachVertex(cloneDistance)
}
}
} else {
clones = nil
}
// Regardless of whether clones are created, update this circuit with the first entry.
// This must happen after cloning, to avoid impacting the circuits in the clones.
c.attachVertex(next[0])
return clones
} | circuit/disparityclonablecircuit.go | 0.732687 | 0.605099 | disparityclonablecircuit.go | starcoder |
package iso20022
// Specifies periods related to a corporate action option.
type CorporateActionPeriod7 struct {
// Period during which the price of a security is determined.
PriceCalculationPeriod *Period3Choice `xml:"PricClctnPrd,omitempty"`
// Period during which both old and new equity may be traded simultaneously, for example, consolidation of equity or splitting of equity.
ParallelTradingPeriod *Period3Choice `xml:"ParllTradgPrd,omitempty"`
// Period during which the specified option, or all options of the event, remains valid, for example, offer period.
ActionPeriod *Period3Choice `xml:"ActnPrd,omitempty"`
// Period during which the shareholder can revoke, change or withdraw its instruction.
RevocabilityPeriod *Period3Choice `xml:"RvcbltyPrd,omitempty"`
// Period during which the privilege is not available, for example, this can happen whenever a meeting takes place or whenever a coupon payment is due.
PrivilegeSuspensionPeriod *Period3Choice `xml:"PrvlgSspnsnPrd,omitempty"`
// Period during which the participant of the account servicer can revoke change or withdraw its instructions.
AccountServicerRevocabilityPeriod *Period3Choice `xml:"AcctSvcrRvcbltyPrd,omitempty"`
// Period defining the last date on which withdrawal in street name requests on the outturn security will be accepted and the date on which the suspension will be released and withdrawal by transfer processing on the outturn security will resume.
DepositorySuspensionPeriodForWithdrawal *Period3Choice `xml:"DpstrySspnsnPrdForWdrwl,omitempty"`
}
func (c *CorporateActionPeriod7) AddPriceCalculationPeriod() *Period3Choice {
c.PriceCalculationPeriod = new(Period3Choice)
return c.PriceCalculationPeriod
}
func (c *CorporateActionPeriod7) AddParallelTradingPeriod() *Period3Choice {
c.ParallelTradingPeriod = new(Period3Choice)
return c.ParallelTradingPeriod
}
func (c *CorporateActionPeriod7) AddActionPeriod() *Period3Choice {
c.ActionPeriod = new(Period3Choice)
return c.ActionPeriod
}
func (c *CorporateActionPeriod7) AddRevocabilityPeriod() *Period3Choice {
c.RevocabilityPeriod = new(Period3Choice)
return c.RevocabilityPeriod
}
func (c *CorporateActionPeriod7) AddPrivilegeSuspensionPeriod() *Period3Choice {
c.PrivilegeSuspensionPeriod = new(Period3Choice)
return c.PrivilegeSuspensionPeriod
}
func (c *CorporateActionPeriod7) AddAccountServicerRevocabilityPeriod() *Period3Choice {
c.AccountServicerRevocabilityPeriod = new(Period3Choice)
return c.AccountServicerRevocabilityPeriod
}
func (c *CorporateActionPeriod7) AddDepositorySuspensionPeriodForWithdrawal() *Period3Choice {
c.DepositorySuspensionPeriodForWithdrawal = new(Period3Choice)
return c.DepositorySuspensionPeriodForWithdrawal
} | CorporateActionPeriod7.go | 0.859339 | 0.498413 | CorporateActionPeriod7.go | starcoder |
package Challenge1_Next_Interval
import "container/heap"
/*
Given an array of intervals, find the next interval of each interval.
In a list of intervals, for an interval ‘i’ its next interval ‘j’ will have the smallest ‘start’ greater than or equal to the ‘end’ of ‘i’.
Write a function to return an array containing indices of the next interval of each input interval.
If there is no next interval of a given interval, return -1. It is given that none of the intervals have the same start point.
Input: Intervals [[2,3], [3,4], [5,6]]
Output: [1, 2, -1]
Explanation: The next interval of [2,3] is [3,4] having index ‘1’. Similarly, the next interval of [3,4] is [5,6] having index ‘2’. There is no next interval for [5,6] hence we have ‘-1’.
Input: Intervals [[3,4], [1,5], [4,6]]
Output: [2, -1, -1]
Explanation: The next interval of [3,4] is [4,6] which has index ‘2’. There is no next interval for [1,5] and [4,6].
*/
func findRightInterval(intervals [][]int) []int {
// 双堆写法
// 一个小顶堆minR用来返回右侧最小的interval,这个堆用来遍历整个intervals
// 一个小顶堆minL用来返回左侧最小的interval,这个堆用来返回满足当前minR的堆顶的next interval
// 本质思想时,不是minR的堆顶的next interval也一定不是下一个minR的堆顶的next interval
// 这个思路本质上和原course中的解法是一致的。
if len(intervals) == 0 {
return []int{}
}
var (
minLInterval = MinLeftHeap{}
minRInterval = MinRightHeap{}
nextInterval = make([]int, len(intervals))
)
heap.Init(&minLInterval)
heap.Init(&minRInterval)
for i := 0; i < len(intervals); i++ {
heap.Push(&minRInterval, Point{x: intervals[i][0], y: intervals[i][1], ind: i})
heap.Push(&minLInterval, Point{x: intervals[i][0], y: intervals[i][1], ind: i})
}
for minRInterval.Len() != 0 {
curPoint := heap.Pop(&minRInterval).(Point)
for minLInterval.Len() != 0 && minLInterval[0].x < curPoint.y {
heap.Pop(&minLInterval)
}
if minLInterval.Len() == 0 {
nextInterval[curPoint.ind] = -1
} else {
nextInterval[curPoint.ind] = minLInterval[0].ind
}
}
return nextInterval
}
type Point struct {
x, y int
ind int
}
type MinRightHeap []Point
type MinLeftHeap []Point
func (m MinRightHeap) Len() int {
return len(m)
}
func (m MinRightHeap) Less(i, j int) bool {
return m[i].y < m[j].y
}
func (m MinRightHeap) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
func (m *MinRightHeap) Push(x interface{}) {
*m = append(*m, x.(Point))
}
func (m *MinRightHeap) Pop() interface{} {
old := *m
x := old[len(*m)-1]
*m = old[:len(*m)-1]
return x
}
func (m MinLeftHeap) Len() int {
return len(m)
}
func (m MinLeftHeap) Less(i, j int) bool {
return m[i].x < m[j].x
}
func (m MinLeftHeap) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
func (m *MinLeftHeap) Push(x interface{}) {
*m = append(*m, x.(Point))
}
func (m *MinLeftHeap) Pop() interface{} {
old := *m
x := old[len(*m)-1]
*m = old[:len(*m)-1]
return x
} | Pattern09 - Two Heaps/Challenge1-Next_Interval/solution.go | 0.646349 | 0.718385 | solution.go | starcoder |
package leabra
import (
"fmt"
"unsafe"
"github.com/goki/ki/bitflag"
"github.com/goki/ki/kit"
)
// NeuronVarStart is the byte offset of fields in the Neuron structure
// where the float32 named variables start.
// Note: all non-float32 infrastructure variables must be at the start!
const NeuronVarStart = 8
// leabra.Neuron holds all of the neuron (unit) level variables -- this is the most basic version with
// rate-code only and no optional features at all.
// All variables accessible via Unit interface must be float32 and start at the top, in contiguous order
type Neuron struct {
Flags NeurFlags `desc:"bit flags for binary state variables"`
SubPool int32 `desc:"index of the sub-level inhibitory pool that this neuron is in (only for 4D shapes, the unit-group / hypercolumn structure level) -- indicies start at 1 -- 0 is layer-level pool."`
Act float32 `desc:"rate-coded activation value reflecting final output of neuron communicated to other neurons, typically in range 0-1. This value includes adaptation and synaptic depression / facilitation effects which produce temporal contrast (see ActLrn for version without this). For rate-code activation, this is noisy-x-over-x-plus-one (NXX1) function; for discrete spiking it is computed from the inverse of the inter-spike interval (ISI), and Spike reflects the discrete spikes."`
ActLrn float32 `desc:"learning activation value, reflecting *dendritic* activity that is not affected by synaptic depression or adapdation channels which are located near the axon hillock. This is the what drives the Avg* values that drive learning. Computationally, neurons strongly discount the signals sent to other neurons to provide temporal contrast, but need to learn based on a more stable reflection of their overall inputs in the dendrites."`
Ge float32 `desc:"total excitatory synaptic conductance -- the net excitatory input to the neuron -- does *not* include Gbar.E"`
Gi float32 `desc:"total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I"`
Gk float32 `desc:"total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K"`
Inet float32 `desc:"net current produced by all channels -- drives update of Vm"`
Vm float32 `desc:"membrane potential -- integrates Inet current over time"`
Targ float32 `desc:"target value: drives learning to produce this activation value"`
Ext float32 `desc:"external input: drives activation of unit from outside influences (e.g., sensory input)"`
AvgSS float32 `desc:"super-short time-scale average of ActLrn activation -- provides the lowest-level time integration -- for spiking this integrates over spikes before subsequent averaging, and it is also useful for rate-code to provide a longer time integral overall"`
AvgS float32 `desc:"short time-scale average of ActLrn activation -- tracks the most recent activation states (integrates over AvgSS values), and represents the plus phase for learning in XCAL algorithms"`
AvgM float32 `desc:"medium time-scale average of ActLrn activation -- integrates over AvgS values, and represents the minus phase for learning in XCAL algorithms"`
AvgL float32 `desc:"long time-scale average of medium-time scale (trial level) activation, used for the BCM-style floating threshold in XCAL"`
AvgLLrn float32 `desc:"how much to learn based on the long-term floating threshold (AvgL) for BCM-style Hebbian learning -- is modulated by level of AvgL itself (stronger Hebbian as average activation goes higher) and optionally the average amount of error experienced in the layer (to retain a common proportionality with the level of error-driven learning across layers)"`
AvgSLrn float32 `desc:"short time-scale activation average that is actually used for learning -- typically includes a small contribution from AvgM in addition to mostly AvgS, as determined by LrnActAvgParams.LrnM -- important to ensure that when unit turns off in plus phase (short time scale), enough medium-phase trace remains so that learning signal doesn't just go all the way to 0, at which point no learning would take place"`
ActQ0 float32 `desc:"the activation state at start of current alpha cycle (same as the state at end of previous cycle)"`
ActQ1 float32 `desc:"the activation state at end of first quarter of current alpha cycle"`
ActQ2 float32 `desc:"the activation state at end of second quarter of current alpha cycle"`
ActM float32 `desc:"the activation state at end of third quarter, which is the traditional posterior-cortical minus phase activation"`
ActP float32 `desc:"the activation state at end of fourth quarter, which is the traditional posterior-cortical plus_phase activation"`
ActDif float32 `desc:"ActP - ActM -- difference between plus and minus phase acts -- reflects the individual error gradient for this neuron in standard error-driven learning terms"`
ActDel float32 `desc:"delta activation: change in Act from one cycle to next -- can be useful to track where changes are taking place"`
ActAvg float32 `desc:"average activation (of final plus phase activation state) over long time intervals (time constant = DtPars.AvgTau -- typically 200) -- useful for finding hog units and seeing overall distribution of activation"`
Noise float32 `desc:"noise value added to unit (ActNoiseParams determines distribution, and when / where it is added)"`
GiSyn float32 `desc:"aggregated synaptic inhibition (from Inhib projections) -- time integral of GiRaw -- this is added with computed FFFB inhibition to get the full inhibition in Gi"`
GiSelf float32 `desc:"total amount of self-inhibition -- time-integrated to avoid oscillations"`
ActSent float32 `desc:"last activation value sent (only send when diff is over threshold)"`
GeRaw float32 `desc:"raw excitatory conductance (net input) received from sending units (send delta's are added to this value)"`
GeInc float32 `desc:"delta increment in GeRaw sent using SendGeDelta"`
GiRaw float32 `desc:"raw inhibitory conductance (net input) received from sending units (send delta's are added to this value)"`
GiInc float32 `desc:"delta increment in GiRaw sent using SendGeDelta"`
GknaFast float32 `desc:"conductance of sodium-gated potassium channel (KNa) fast dynamics (M-type) -- produces accommodation / adaptation of firing"`
GknaMed float32 `desc:"conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick) -- produces accommodation / adaptation of firing"`
GknaSlow float32 `desc:"conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack) -- produces accommodation / adaptation of firing"`
Spike float32 `desc:"whether neuron has spiked or not (0 or 1), for discrete spiking neurons."`
ISI float32 `desc:"current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized."`
ISIAvg float32 `desc:"average inter-spike-interval -- average time interval between spikes. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization."`
}
var NeuronVars = []string{"Act", "ActLrn", "Ge", "Gi", "Gk", "Inet", "Vm", "Targ", "Ext", "AvgSS", "AvgS", "AvgM", "AvgL", "AvgLLrn", "AvgSLrn", "ActQ0", "ActQ1", "ActQ2", "ActM", "ActP", "ActDif", "ActDel", "ActAvg", "Noise", "GiSyn", "GiSelf", "ActSent", "GeRaw", "GeInc", "GiRaw", "GiInc", "GknaFast", "GknaMed", "GknaSlow", "Spike", "ISI", "ISIAvg"}
var NeuronVarsMap map[string]int
var NeuronVarProps = map[string]string{
"Vm": `min:"0" max:"1"`,
"ActDel": `auto-scale:"+"`,
"ActDif": `auto-scale:"+"`,
}
func init() {
NeuronVarsMap = make(map[string]int, len(NeuronVars))
for i, v := range NeuronVars {
NeuronVarsMap[v] = i
}
}
func (nrn *Neuron) VarNames() []string {
return NeuronVars
}
// NeuronVarByName returns the index of the variable in the Neuron, or error
func NeuronVarByName(varNm string) (int, error) {
i, ok := NeuronVarsMap[varNm]
if !ok {
return 0, fmt.Errorf("Neuron VarByName: variable name: %v not valid", varNm)
}
return i, nil
}
// VarByIndex returns variable using index (0 = first variable in NeuronVars list)
func (nrn *Neuron) VarByIndex(idx int) float32 {
fv := (*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(nrn)) + uintptr(NeuronVarStart+4*idx)))
return *fv
}
// VarByName returns variable by name, or error
func (nrn *Neuron) VarByName(varNm string) (float32, error) {
i, err := NeuronVarByName(varNm)
if err != nil {
return 0, err
}
return nrn.VarByIndex(i), nil
}
func (nrn *Neuron) HasFlag(flag NeurFlags) bool {
return bitflag.Has32(int32(nrn.Flags), int(flag))
}
func (nrn *Neuron) SetFlag(flag NeurFlags) {
bitflag.Set32((*int32)(&nrn.Flags), int(flag))
}
func (nrn *Neuron) ClearFlag(flag NeurFlags) {
bitflag.Clear32((*int32)(&nrn.Flags), int(flag))
}
func (nrn *Neuron) SetMask(mask int32) {
bitflag.SetMask32((*int32)(&nrn.Flags), mask)
}
func (nrn *Neuron) ClearMask(mask int32) {
bitflag.ClearMask32((*int32)(&nrn.Flags), mask)
}
// IsOff returns true if the neuron has been turned off (lesioned)
func (nrn *Neuron) IsOff() bool {
return nrn.HasFlag(NeurOff)
}
// NeurFlags are bit-flags encoding relevant binary state for neurons
type NeurFlags int32
//go:generate stringer -type=NeurFlags
var KiT_NeurFlags = kit.Enums.AddEnum(NeurFlagsN, true, nil)
func (ev NeurFlags) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
func (ev *NeurFlags) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
// The neuron flags
const (
// NeurOff flag indicates that this neuron has been turned off (i.e., lesioned)
NeurOff NeurFlags = iota
// NeurHasExt means the neuron has external input in its Ext field
NeurHasExt
// NeurHasTarg means the neuron has external target input in its Targ field
NeurHasTarg
// NeurHasCmpr means the neuron has external comparison input in its Targ field -- used for computing
// comparison statistics but does not drive neural activity ever
NeurHasCmpr
NeurFlagsN
)
/*
more specialized flags in C++ emergent -- only add in specialized cases where needed, although
there could be conflicts potentially, so may want to just go ahead and add here..
enum LeabraUnitFlags { // #BITS extra flags on top of ext flags for leabra
SUPER = 0x00000100, // superficial layer neocortical cell -- has deep.on role = SUPER
DEEP = 0x00000200, // deep layer neocortical cell -- has deep.on role = DEEP
TRC = 0x00000400, // thalamic relay cell (Pulvinar) cell -- has deep.on role = TRC
D1R = 0x00001000, // has predominantly D1 receptors
D2R = 0x00002000, // has predominantly D2 receptors
ACQUISITION = 0x00004000, // involved in Acquisition
EXTINCTION = 0x00008000, // involved in Extinction
APPETITIVE = 0x00010000, // appetitive (positive valence) coding
AVERSIVE = 0x00020000, // aversive (negative valence) coding
PATCH = 0x00040000, // patch-like structure (striosomes)
MATRIX = 0x00080000, // matrix-like structure
DORSAL = 0x00100000, // dorsal
VENTRAL = 0x00200000, // ventral
};
*/ | leabra/neuron.go | 0.698021 | 0.663511 | neuron.go | starcoder |
package main
// TrieNode represents a single node in a Trie
type TrieNode struct {
children map[rune]*TrieNode
isWord bool
}
// NewTrieNode creates a new pre-defined trie node
// This method should only be called by trie library methods
// Users should not call this method directly
func NewTrieNode() *TrieNode {
return &TrieNode{
children: make(map[rune]*TrieNode),
isWord: false,
}
}
// DFSCount implements a depth-first search count of words in a trie node
// This method should only be called by trie library methods
// Users should not call this method directly
func (node *TrieNode) DFSCount() int {
count := 0
if node.isWord {
count = 1
}
for _, child := range node.children {
count += child.DFSCount()
}
return count
}
// DFSList implements a depth-first search listing of words in a trie node
// This method should only be called by trie library methods
// Users should not call this method directly
func (node *TrieNode) DFSList(path []rune) []string {
var words []string
if node.isWord {
words = append(words, string(path))
}
for char, child := range node.children {
words = append(words, child.DFSList(append(path, char))...)
}
return words
}
// Trie is a simple struct that will only hold a root TrieNode
type Trie struct {
root *TrieNode
}
// NewTrie creates a pre-defined empty Trie struct
func NewTrie() *Trie {
return &Trie{
root: NewTrieNode(),
}
}
// Insert will accept a string and insert into a Trie struct
func (trie *Trie) Insert(word string) {
currentNode := trie.root
for _, char := range word {
if currentNode.children[char] == nil {
currentNode.children[char] = NewTrieNode()
}
currentNode = currentNode.children[char]
}
currentNode.isWord = true
}
// Contains parses a Trie and returns true if the trie contains the selected word
func (trie *Trie) Contains(word string) bool {
currentNode := trie.root
for _, char := range word {
if _, ok := currentNode.children[char]; ok {
currentNode = currentNode.children[char]
} else {
return false
}
}
return currentNode.isWord
}
// Count parses a Trie and returns the number of words in the Trie
func (trie *Trie) Count() int {
currentNode := trie.root
return currentNode.DFSCount()
}
// List parses a Trie and returns a list of strings contained in the Trie
func (trie *Trie) List() []string {
currentNode := trie.root
return currentNode.DFSList(make([]rune, 0))
}
func (trie *Trie) Search(partial string) {
} | trie.go | 0.780077 | 0.472805 | trie.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedFloat32 supports encrypting Float32 data
type EncryptedFloat32 struct {
Field
Raw float32
}
// Scan converts the value from the DB into a usable EncryptedFloat32 value
func (s *EncryptedFloat32) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedFloat32 value into a value that can safely be stored in the DB
func (s EncryptedFloat32) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedFloat32 supports encrypting nullable Float32 data
type NullEncryptedFloat32 struct {
Field
Raw float32
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedFloat32 value
func (s *NullEncryptedFloat32) Scan(value interface{}) error {
if value == nil {
s.Raw = 0
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedFloat32 value into a value that can safely be stored in the DB
func (s NullEncryptedFloat32) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedFloat32 supports signing Float32 data
type SignedFloat32 struct {
Field
Raw float32
Valid bool
}
// Scan converts the value from the DB into a usable SignedFloat32 value
func (s *SignedFloat32) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedFloat32 value into a value that can safely be stored in the DB
func (s SignedFloat32) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedFloat32 supports signing nullable Float32 data
type NullSignedFloat32 struct {
Field
Raw float32
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedFloat32 value
func (s *NullSignedFloat32) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedFloat32 value into a value that can safely be stored in the DB
func (s NullSignedFloat32) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedFloat32 supports signing and encrypting Float32 data
type SignedEncryptedFloat32 struct {
Field
Raw float32
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedFloat32 value
func (s *SignedEncryptedFloat32) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedFloat32 value into a value that can safely be stored in the DB
func (s SignedEncryptedFloat32) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedFloat32 supports signing and encrypting nullable Float32 data
type NullSignedEncryptedFloat32 struct {
Field
Raw float32
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedFloat32 value
func (s *NullSignedEncryptedFloat32) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedFloat32 value into a value that can safely be stored in the DB
func (s NullSignedEncryptedFloat32) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_float32.go | 0.827724 | 0.595022 | type_float32.go | starcoder |
package specs
import (
"errors"
"fmt"
"github.com/jexia/semaphore/v2/pkg/specs/metadata"
)
// Enum represents a enum configuration
type Enum struct {
*metadata.Meta
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Keys map[string]*EnumValue `json:"keys,omitempty" yaml:"keys,omitempty"`
Positions map[int32]*EnumValue `json:"positions,omitempty" yaml:"positions,omitempty"`
}
// Clone enum schema.
func (enum Enum) Clone() *Enum { return &enum }
// Compare the given enum value against of the expected one and return the first
// met difference as error.
func (enum *Enum) Compare(expected *Enum) error {
if expected == nil && enum == nil {
return nil
}
if expected == nil && enum != nil {
return errors.New("expected to be nil")
}
if expected != nil && enum == nil {
return fmt.Errorf("expected to be %v, got %v", expected.Name, nil)
}
if expected.Name != enum.Name {
return fmt.Errorf("expected to be %v, got %v", expected.Name, enum.Name)
}
if len(expected.Keys) != len(enum.Keys) {
return fmt.Errorf("expected to have %v keys, got %v", len(expected.Keys), enum.Keys)
}
if len(expected.Positions) != len(enum.Positions) {
return fmt.Errorf("expected to have %v positions, got %v", len(expected.Positions), enum.Positions)
}
for expectedKey, expectedValue := range expected.Keys {
// given enum does not include the current key
enumValue, ok := enum.Keys[expectedKey]
if !ok {
return fmt.Errorf("expected to have %v key", expectedKey)
}
err := enumValue.Compare(expectedValue)
if err != nil {
return fmt.Errorf("value mismatch: %w", err)
}
}
for expectedPos, expectedValue := range expected.Positions {
// given enum does not include the current position
enumValue, ok := enum.Positions[expectedPos]
if !ok {
return fmt.Errorf("expected to have %v position", expectedPos)
}
err := enumValue.Compare(expectedValue)
if err != nil {
return fmt.Errorf("value mismatch: %w", err)
}
}
return nil
}
// EnumValue represents a enum configuration
type EnumValue struct {
*metadata.Meta
Key string `json:"key,omitempty"`
Position int32 `json:"position,omitempty"`
Description string `json:"description,omitempty"`
}
// Compare the given enum value against the expected one and returns the first
// met difference as error.
func (value *EnumValue) Compare(expected *EnumValue) error {
if expected == nil && value == nil {
return nil
}
if expected == nil && value != nil {
return errors.New("expected to be nil")
}
if expected != nil && value == nil {
return fmt.Errorf("expected to be %v:%v, got %v", expected.Key, expected.Position, nil)
}
if expected.Key != value.Key || expected.Position != value.Position {
return fmt.Errorf("expected to be %v:%v, got %v:%v", expected.Key, expected.Position, value.Key, value.Position)
}
return nil
} | pkg/specs/enum.go | 0.791338 | 0.417717 | enum.go | starcoder |
package genericlist
import (
"fmt"
"sort"
)
type FloatList struct {
Values []float64
}
func (floatlist *FloatList) Append(x float64) {
floatlist.Values = append(floatlist.Values, x)
}
func (floatlist *FloatList) Extend(x []float64) {
floatlist.Values = append(floatlist.Values, x...)
}
func (floatlist *FloatList) Insert(i int, x float64) {
// Make space in the array for a new element. You can assign it any value.
floatlist.Values = append(floatlist.Values, 0.0)
// Copy over elements sourced from index 2, into elements starting at index 3.
copy(floatlist.Values[i+1:], floatlist.Values[i:])
// assign value to index
floatlist.Values[i] = x
}
func (floatlist *FloatList) Remove(x float64) {
// find value of x
for i, value := range floatlist.Values {
if value == x {
// Where a is the slice, and i is the index of the element you want to delete:
floatlist.Values = append(floatlist.Values[:i], floatlist.Values[i+1:]...)
break
}
}
}
func (floatlist *FloatList) Pop(x ...int) float64 {
k := len(floatlist.Values) - 1
if len(x) == 0 {
// make a copy of last item
res := floatlist.Values[k]
// remove the last item in list
floatlist.Values = append(floatlist.Values[:k], floatlist.Values[k+1:]...)
return res
} else {
i := x[0]
res := floatlist.Values[i]
// remove item in index i
floatlist.Values = append(floatlist.Values[:i], floatlist.Values[i+1:]...)
return res
}
}
func (floatlist *FloatList) Popleft() {
floatlist.Pop(0)
}
func (floatlist *FloatList) Clear() {
floatlist.Values = nil
}
func (floatlist *FloatList) Index(x float64) []int {
// find value of x
res := []int{}
for i, value := range floatlist.Values {
if value == x {
// Where a is the slice, and i is the index of the element you want to delete:
res = append(res, i)
}
}
return res
}
func (floatlist *FloatList) Count(x float64) int {
// find value of x
res := 0
for _, value := range floatlist.Values {
if value == x {
// Where a is the slice, and i is the index of the element you want to delete:
res++
}
}
return res
}
func (floatlist *FloatList) Sort() {
sort.Sort(sort.Float64Slice(floatlist.Values))
}
func (floatlist *FloatList) Reverse() {
sort.Sort(sort.Reverse(sort.Float64Slice(floatlist.Values)))
}
func (floatlist *FloatList) Copy() FloatList {
res := FloatList{}
// process a deepcopy
res.Values = make([]float64, len(floatlist.Values))
copy(res.Values, floatlist.Values)
return res
}
func (floatlist FloatList) Len() int {
return len(floatlist.Values)
}
func (list FloatList) Contain(x float64) bool {
// find value of x
for _, value := range list.Values {
if value == x {
// Where a is the slice, and i is the index of the element you want to delete:
return true
}
}
return false
}
type iterable interface {
Len() int
}
func Len(list iterable) int {
return list.Len()
}
func TestFloat() {
x := FloatList{[]float64{1.2, 2.454, 3.12}}
// fmt.Println(x)
for _, value := range []float64{5.4, 6.3, 7.02, 8.0, 9.9} {
x.Append(value)
}
fmt.Println(x)
x.Extend([]float64{10.10, 11.12})
fmt.Println(x)
x.Insert(3, 9)
fmt.Println(x)
x.Remove(11)
fmt.Println(x)
c := x.Copy()
y := x.Pop()
fmt.Println(y)
z := x.Pop(2)
fmt.Println(z)
fmt.Println(x.Index(7.02))
fmt.Println(x.Count(9))
x.Sort()
fmt.Println(x)
x.Reverse()
fmt.Println(x)
// check copy list
c.Append(-1)
fmt.Println(c)
fmt.Println(Len(c))
x.Clear()
fmt.Println(x)
} | pyutils/genericList/FloatList.go | 0.589126 | 0.431405 | FloatList.go | starcoder |
package trueskill
import (
"github.com/ChrisHines/GoSkills/skills"
"github.com/ChrisHines/GoSkills/skills/numerics"
"math"
"sort"
)
// Calculates new ratings for only two teams where each team has 1 or more players.
// When you only have two teams, the math is still simple: no factor graphs are used yet.
type TwoTeamCalc struct{}
// Calculates new ratings based on the prior ratings and team ranks use 1 for first place, repeat the number for a tie (e.g. 1, 2, 2).
func (calc *TwoTeamCalc) CalcNewRatings(gi *skills.GameInfo, teams []skills.Team, ranks ...int) skills.PlayerRatings {
newSkills := make(map[interface{}]skills.Rating)
// Basic argument checking
validateTeamCount(teams, twoTeamTeamRange)
validatePlayersPerTeam(teams, twoTeamPlayerRange)
// Copy slices so we don't confuse the client code
steams := append([]skills.Team{}, teams...)
sranks := append([]int{}, ranks...)
// Make sure things are in order
sort.Sort(skills.NewRankedTeams(steams, sranks))
winningTeam := steams[0]
losingTeam := steams[1]
wasDraw := sranks[0] == sranks[1]
twoTeamUpdateRatings(gi, newSkills, winningTeam, losingTeam, cond(wasDraw, skills.Draw, skills.Win))
twoTeamUpdateRatings(gi, newSkills, losingTeam, winningTeam, cond(wasDraw, skills.Draw, skills.Lose))
return newSkills
}
func twoTeamUpdateRatings(gi *skills.GameInfo, newSkills skills.PlayerRatings, selfTeam, otherTeam skills.Team, comparison int) {
drawMargin := drawMarginFromDrawProbability(gi.DrawProbability, gi.Beta)
betaSqr := numerics.Sqr(gi.Beta)
tauSqr := numerics.Sqr(gi.DynamicsFactor)
totalPlayers := selfTeam.PlayerCount() + otherTeam.PlayerCount()
selfMeanSum := selfTeam.Accum(skills.MeanSum)
otherMeanSum := otherTeam.Accum(skills.MeanSum)
c := math.Sqrt(selfTeam.Accum(skills.VarianceSum) + otherTeam.Accum(skills.VarianceSum) + float64(totalPlayers)*betaSqr)
winningMean := selfMeanSum
losingMean := otherMeanSum
if comparison == skills.Lose {
winningMean, losingMean = losingMean, winningMean
}
meanDelta := winningMean - losingMean
var v, w, rankMultiplier float64
if comparison != skills.Draw {
v = vExceedsMarginC(meanDelta, drawMargin, c)
w = wExceedsMarginC(meanDelta, drawMargin, c)
rankMultiplier = float64(comparison)
} else {
v = vWithinMarginC(meanDelta, drawMargin, c)
w = wWithinMarginC(meanDelta, drawMargin, c)
rankMultiplier = 1
}
for p, r := range selfTeam.PlayerRatings {
prevPlayerRating := r
meanMultiplier := (prevPlayerRating.Variance() + tauSqr) / c
stdDevMultiplier := (prevPlayerRating.Variance() + tauSqr) / numerics.Sqr(c)
playerMeanDelta := rankMultiplier * meanMultiplier * v
newMean := prevPlayerRating.Mean() + playerMeanDelta
newStdDev := math.Sqrt((prevPlayerRating.Variance() + tauSqr) * (1 - w*stdDevMultiplier))
newSkills[p] = skills.NewRating(newMean, newStdDev)
}
}
// Calculates the match quality as the likelihood of all teams drawing (0% = bad, 100% = well matched).
func (calc *TwoTeamCalc) CalcMatchQual(gi *skills.GameInfo, teams []skills.Team) float64 {
// Basic argument checking
validateTeamCount(teams, twoTeamTeamRange)
validatePlayersPerTeam(teams, twoTeamPlayerRange)
// We've verified that there's just two teams
team1 := teams[0]
team1Count := team1.PlayerCount()
team2 := teams[1]
team2Count := team2.PlayerCount()
totalPlayers := team1Count + team2Count
betaSqr := numerics.Sqr(gi.Beta)
team1MeanSum := team1.Accum(skills.MeanSum)
team1VarSum := team1.Accum(skills.VarianceSum)
team2MeanSum := team2.Accum(skills.MeanSum)
team2VarSum := team2.Accum(skills.VarianceSum)
// This comes from equation 4.1 in the TrueSkill paper on page 8
// The equation was broken up into the part under the square root sign and
// the exponential part to make the code easier to read.
betaSqrPlayers := betaSqr * float64(totalPlayers)
sqrtPart := math.Sqrt(betaSqrPlayers / (betaSqrPlayers + team1VarSum + team2VarSum))
expPart := math.Exp(-.5 * numerics.Sqr(team1MeanSum-team2MeanSum) / (betaSqrPlayers + team1VarSum + team2VarSum))
return expPart * sqrtPart
}
var (
twoTeamTeamRange = numerics.Exactly(2)
twoTeamPlayerRange = numerics.AtLeast(1)
) | vendor/github.com/ChrisHines/GoSkills/skills/trueskill/TwoTeamCalc.go | 0.747892 | 0.464902 | TwoTeamCalc.go | starcoder |
// Package dep analyzes dependencies between values.
package dep
import (
"errors"
"cuelang.org/go/internal/core/adt"
)
// A Dependency is a reference and the node that reference resolves to.
type Dependency struct {
// Node is the referenced node.
Node *adt.Vertex
// Reference is the expression that referenced the node.
Reference adt.Resolver
top bool
}
// Import returns the import reference or nil if the reference was within
// the same package as the visited Vertex.
func (d *Dependency) Import() *adt.ImportReference {
x, _ := d.Reference.(adt.Expr)
return importRef(x)
}
// IsRoot reports whether the dependency is referenced by the root of the
// original Vertex passed to any of the Visit* functions, and not one of its
// descendent arcs. This always returns true for Visit().
func (d *Dependency) IsRoot() bool {
return d.top
}
func (d *Dependency) Path() []adt.Feature {
return nil
}
func importRef(r adt.Expr) *adt.ImportReference {
switch x := r.(type) {
case *adt.ImportReference:
return x
case *adt.SelectorExpr:
return importRef(x.X)
case *adt.IndexExpr:
return importRef(x.X)
}
return nil
}
// VisitFunc is used for reporting dependencies.
type VisitFunc func(Dependency) error
// Visit calls f for all vertices referenced by the conjuncts of n without
// descending into the elements of list or fields of structs. Only references
// that do not refer to the conjuncts of n itself are reported.
func Visit(c *adt.OpContext, n *adt.Vertex, f VisitFunc) error {
return visit(c, n, f, false, true)
}
// VisitAll calls f for all vertices referenced by the conjuncts of n including
// those of descendant fields and elements. Only references that do not refer to
// the conjuncts of n itself are reported.
func VisitAll(c *adt.OpContext, n *adt.Vertex, f VisitFunc) error {
return visit(c, n, f, true, true)
}
// VisitFields calls f for n and all its descendent arcs that have a conjunct
// that originates from a conjunct in n. Only the conjuncts of n that ended up
// as a conjunct in an actual field are visited and they are visited for each
// field in which the occurs.
func VisitFields(c *adt.OpContext, n *adt.Vertex, f VisitFunc) error {
m := marked{}
m.markExpr(n)
dynamic(c, n, f, m, true)
return nil
}
var empty *adt.Vertex
func init() {
// TODO: Consider setting a non-nil BaseValue.
empty = &adt.Vertex{}
empty.UpdateStatus(adt.Finalized)
}
func visit(c *adt.OpContext, n *adt.Vertex, f VisitFunc, all, top bool) (err error) {
if c == nil {
panic("nil context")
}
v := visitor{
ctxt: c,
visit: f,
node: n,
all: all,
top: top,
}
defer func() {
switch x := recover(); x {
case nil:
case aborted:
err = v.err
default:
panic(x)
}
}()
for _, x := range n.Conjuncts {
v.markExpr(x.Env, x.Elem())
}
return nil
}
var aborted = errors.New("aborted")
type visitor struct {
ctxt *adt.OpContext
visit VisitFunc
node *adt.Vertex
err error
all bool
top bool
}
// TODO: factor out the below logic as either a low-level dependency analyzer or
// some walk functionality.
// markExpr visits all nodes in an expression to mark dependencies.
func (c *visitor) markExpr(env *adt.Environment, expr adt.Elem) {
switch x := expr.(type) {
case nil:
case adt.Resolver:
c.markResolver(env, x)
case *adt.BinaryExpr:
c.markExpr(env, x.X)
c.markExpr(env, x.Y)
case *adt.UnaryExpr:
c.markExpr(env, x.X)
case *adt.Interpolation:
for i := 1; i < len(x.Parts); i += 2 {
c.markExpr(env, x.Parts[i])
}
case *adt.BoundExpr:
c.markExpr(env, x.Expr)
case *adt.CallExpr:
c.markExpr(env, x.Fun)
saved := c.all
c.all = true
for _, a := range x.Args {
c.markExpr(env, a)
}
c.all = saved
case *adt.DisjunctionExpr:
for _, d := range x.Values {
c.markExpr(env, d.Val)
}
case *adt.SliceExpr:
c.markExpr(env, x.X)
c.markExpr(env, x.Lo)
c.markExpr(env, x.Hi)
c.markExpr(env, x.Stride)
case *adt.ListLit:
env := &adt.Environment{Up: env, Vertex: empty}
for _, e := range x.Elems {
switch x := e.(type) {
case *adt.Comprehension:
c.markComprehension(env, x)
case adt.Expr:
c.markSubExpr(env, x)
case *adt.Ellipsis:
if x.Value != nil {
c.markSubExpr(env, x.Value)
}
}
}
case *adt.StructLit:
env := &adt.Environment{Up: env, Vertex: empty}
for _, e := range x.Decls {
c.markDecl(env, e)
}
}
}
// markResolve resolves dependencies.
func (c *visitor) markResolver(env *adt.Environment, r adt.Resolver) {
switch x := r.(type) {
case nil:
case *adt.LetReference:
saved := c.ctxt.PushState(env, nil)
env := c.ctxt.Env(x.UpCount)
c.markExpr(env, x.X)
c.ctxt.PopState(saved)
return
}
if ref, _ := c.ctxt.Resolve(env, r); ref != nil {
if ref != c.node && ref != empty {
d := Dependency{
Node: ref,
Reference: r,
top: c.top,
}
if err := c.visit(d); err != nil {
c.err = err
panic(aborted)
}
}
return
}
// It is possible that a reference cannot be resolved because it is
// incomplete. In this case, we should check whether subexpressions of the
// reference can be resolved to mark those dependencies. For instance,
// prefix paths of selectors and the value or index of an index experssion
// may independently resolve to a valid dependency.
switch x := r.(type) {
case *adt.NodeLink:
panic("unreachable")
case *adt.IndexExpr:
c.markExpr(env, x.X)
c.markExpr(env, x.Index)
case *adt.SelectorExpr:
c.markExpr(env, x.X)
}
}
func (c *visitor) markSubExpr(env *adt.Environment, x adt.Expr) {
if c.all {
saved := c.top
c.top = false
c.markExpr(env, x)
c.top = saved
}
}
func (c *visitor) markDecl(env *adt.Environment, d adt.Decl) {
switch x := d.(type) {
case *adt.Field:
c.markSubExpr(env, x.Value)
case *adt.OptionalField:
// when dynamic, only continue if there is evidence of
// the field in the parallel actual evaluation.
c.markSubExpr(env, x.Value)
case *adt.BulkOptionalField:
c.markExpr(env, x.Filter)
// when dynamic, only continue if there is evidence of
// the field in the parallel actual evaluation.
c.markSubExpr(env, x.Value)
case *adt.DynamicField:
c.markExpr(env, x.Key)
// when dynamic, only continue if there is evidence of
// a matching field in the parallel actual evaluation.
c.markSubExpr(env, x.Value)
case *adt.Comprehension:
c.markComprehension(env, x)
case adt.Expr:
c.markExpr(env, x)
case *adt.Ellipsis:
if x.Value != nil {
c.markSubExpr(env, x.Value)
}
}
}
func (c *visitor) markComprehension(env *adt.Environment, y *adt.Comprehension) {
env = c.markYielder(env, y.Clauses)
c.markExpr(env, y.Value)
}
func (c *visitor) markYielder(env *adt.Environment, y adt.Yielder) *adt.Environment {
switch x := y.(type) {
case *adt.ForClause:
c.markExpr(env, x.Src)
env = &adt.Environment{Up: env, Vertex: empty}
env = c.markYielder(env, x.Dst)
// In dynamic mode, iterate over all actual value and
// evaluate.
case *adt.LetClause:
c.markExpr(env, x.Expr)
env = &adt.Environment{Up: env, Vertex: empty}
env = c.markYielder(env, x.Dst)
case *adt.IfClause:
c.markExpr(env, x.Condition)
// In dynamic mode, only continue if condition is true.
env = c.markYielder(env, x.Dst)
}
return env
} | internal/core/dep/dep.go | 0.604632 | 0.438545 | dep.go | starcoder |
package service
import (
"image"
"math"
)
type rotate struct {
dx float64
dy float64
sin float64
cos float64
neww float64
newh float64
src *image.RGBA
}
func (r *rotate) rotate(angle float64, src *image.RGBA) *rotate {
r.src = src
srsize := src.Bounds().Size()
width, height := srsize.X, srsize.Y
// 源图四个角的坐标(以图像中心为坐标系原点)
// 左下角,右下角,左上角,右上角
srcwp, srchp := float64(width)*0.5, float64(height)*0.5
srcx1, srcy1 := -srcwp, srchp
srcx2, srcy2 := srcwp, srchp
srcx3, srcy3 := -srcwp, -srchp
srcx4, srcy4 := srcwp, -srchp
r.sin, r.cos = math.Sincos(radian(angle))
// 旋转后的四角坐标
desx1, desy1 := r.cos*srcx1+r.sin*srcy1, -r.sin*srcx1+r.cos*srcy1
desx2, desy2 := r.cos*srcx2+r.sin*srcy2, -r.sin*srcx2+r.cos*srcy2
desx3, desy3 := r.cos*srcx3+r.sin*srcy3, -r.sin*srcx3+r.cos*srcy3
desx4, desy4 := r.cos*srcx4+r.sin*srcy4, -r.sin*srcx4+r.cos*srcy4
// 新的高度很宽度
r.neww = math.Max(math.Abs(desx4-desx1), math.Abs(desx3-desx2)) + 0.5
r.newh = math.Max(math.Abs(desy4-desy1), math.Abs(desy3-desy2)) + 0.5
r.dx = -0.5*r.neww*r.cos - 0.5*r.newh*r.sin + srcwp
r.dy = 0.5*r.neww*r.sin - 0.5*r.newh*r.cos + srchp
return r
}
func radian(angle float64) float64 {
return angle * math.Pi / 180.0
}
func (r *rotate) transformRGBA() image.Image {
srcb := r.src.Bounds()
b := image.Rect(0, 0, int(r.neww), int(r.newh))
dst := image.NewRGBA(b)
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
sx, sy := r.pt(x, y)
if inBounds(srcb, sx, sy) {
// 消除锯齿填色
c := bili.RGBA(r.src, sx, sy)
off := (y-dst.Rect.Min.Y)*dst.Stride + (x-dst.Rect.Min.X)*4
dst.Pix[off+0] = c.R
dst.Pix[off+1] = c.G
dst.Pix[off+2] = c.B
dst.Pix[off+3] = c.A
}
}
}
return dst
}
func (r *rotate) pt(x, y int) (float64, float64) {
return float64(-y)*r.sin + float64(x)*r.cos + r.dy,
float64(y)*r.cos + float64(x)*r.sin + r.dx
}
func inBounds(b image.Rectangle, x, y float64) bool {
if x < float64(b.Min.X) || x >= float64(b.Max.X) {
return false
}
if y < float64(b.Min.Y) || y >= float64(b.Max.Y) {
return false
}
return true
}
func offRGBA(src *image.RGBA, x, y int) int {
return (y-src.Rect.Min.Y)*src.Stride + (x-src.Rect.Min.X)*4
} | app/interface/main/captcha/service/rotate.go | 0.521715 | 0.425904 | rotate.go | starcoder |
package skyhook
import (
"fmt"
"runtime"
)
type ExecOp interface {
Parallelism() int
Apply(task ExecTask) error
Close()
}
// A wrapper for a simple exec op that needs no persistent state.
// So the wrapper just wraps a function, along with desired parallelism.
type SimpleExecOp struct {
ApplyFunc func(ExecTask) error
P int
}
func (e SimpleExecOp) Parallelism() int {
if e.P == 0 {
return runtime.NumCPU()
}
return e.P
}
func (e SimpleExecOp) Apply(task ExecTask) error {
return e.ApplyFunc(task)
}
func (e SimpleExecOp) Close() {}
type ExecTask struct {
// For incremental operations, this must be the output key that will be created by this task.
// TODO: operation may need to produce multiple output keys at some task
// For other operations, I think this can be arbitrary, but usually it's still related to the output key
Key string
// Generally maps from input name to list of items in each dataset at that input
Items map[string][][]Item
Metadata string
}
// Config of the ExecOp for front-end.
type ExecOpConfig struct {
ID string
Name string
Description string
}
type ExecOpProvider interface {
// Returns config for front-end.
Config() ExecOpConfig
// Returns resource requirements.
Requirements(node Runnable) map[string]int
// Returns list of tasks.
// items: is a map: input name -> input dataset index -> items in that dataset
GetTasks(node Runnable, items map[string][][]Item) ([]ExecTask, error)
// Prepare the ExecOp for a node.
Prepare(url string, node Runnable) (ExecOp, error)
// Determines the input specification of a node.
GetInputs(params string) []ExecInput
// Determines the output specification of a node.
GetOutputs(params string, inputTypes map[string][]DataType) []ExecOutput
// Incremental ops support partial computation of their outputs.
// This is only possible for concrete nodes (Resolve must return nil).
IsIncremental() bool
// Must be implemented if Incremental.
// GetOutputKeys returns all output keys that would be produced given a set of input keys.
// GetNeededInputs returns the input keys that are needed to compute a given subset of output keys.
GetOutputKeys(node ExecNode, inputs map[string][][]string) []string
GetNeededInputs(node ExecNode, outputs []string) map[string][][]string
// Docker image name
GetImageName(node Runnable) (string, error)
// Optional system to provide customized state to store in ExecNode jobs.
// For example, when training a model, we may want to store the loss history.
// Can return nil to use defaults.
// Second return value is the view of the JobOp, empty string to use default view.
GetJobOp(node Runnable) (JobOp, string)
// Virtualize is called when constructing an initial ExecutionGraph.
// For example, if(A) { input B } else { input C } can be implemented by:
// - Virtualize should return VirtualNode requiring only A
// - Resolve can load A, and output a new graph that includes B or C depending on A
Virtualize(node ExecNode) *VirtualNode
// Optional system for pre-processing steps, dynamic execution graphs, etc.
// Given a VirtualNode, returns a subgraph of new VirtualNodes that implement it.
// Or nil if the VirtualNode is already OK.
// Resolve is called just before executing the node.
Resolve(node *VirtualNode, inputDatasets map[string][]Dataset, items map[string][][]Item) ExecutionGraph
}
// A helper to implement an ExecOpProvider as a struct.
// This way optional methods can be omitted and defaults used instead.
// It can be compiled to an ExecOpProvider by wrapping in an ExecOpImplProvider.
type ExecOpImpl struct {
Config ExecOpConfig
Requirements func(node Runnable) map[string]int
GetTasks func(node Runnable, items map[string][][]Item) ([]ExecTask, error)
Prepare func(url string, node Runnable) (ExecOp, error)
// only one should be set (static/dynamic)
ImageName string
GetImageName func(node Runnable) (string, error)
// static specification of inputs/outputs
// one of dynamic/static should be set
Inputs []ExecInput
Outputs []ExecOutput
// dynamic specification of inputs/outputs
// one of dynamic/static should be set
GetInputs func(params string) []ExecInput
GetOutputs func(params string, inputTypes map[string][]DataType) []ExecOutput
// optional; if set, op is considered "incremental"
Incremental bool
GetOutputKeys func(node ExecNode, inputs map[string][][]string) []string
GetNeededInputs func(node ExecNode, outputs []string) map[string][][]string
// more various optional functions
GetJobOp func(node Runnable) (JobOp, string)
Virtualize func(node ExecNode) *VirtualNode
Resolve func(node *VirtualNode, inputDatasets map[string][]Dataset, items map[string][][]Item) ExecutionGraph
}
type ExecOpImplProvider struct {
Impl ExecOpImpl
}
func (p ExecOpImplProvider) Config() ExecOpConfig {
return p.Impl.Config
}
func (p ExecOpImplProvider) Requirements(node Runnable) map[string]int {
return p.Impl.Requirements(node)
}
func (p ExecOpImplProvider) GetTasks(node Runnable, items map[string][][]Item) ([]ExecTask, error) {
return p.Impl.GetTasks(node, items)
}
func (p ExecOpImplProvider) Prepare(url string, node Runnable) (ExecOp, error) {
return p.Impl.Prepare(url, node)
}
func (p ExecOpImplProvider) GetInputs(params string) []ExecInput {
if p.Impl.Inputs != nil {
return p.Impl.Inputs
} else {
return p.Impl.GetInputs(params)
}
}
func (p ExecOpImplProvider) GetOutputs(params string, inputTypes map[string][]DataType) []ExecOutput {
if p.Impl.Outputs != nil {
return p.Impl.Outputs
} else {
return p.Impl.GetOutputs(params, inputTypes)
}
}
func (p ExecOpImplProvider) IsIncremental() bool {
return p.Impl.Incremental
}
func (p ExecOpImplProvider) GetOutputKeys(node ExecNode, inputs map[string][][]string) []string {
return p.Impl.GetOutputKeys(node, inputs)
}
func (p ExecOpImplProvider) GetNeededInputs(node ExecNode, outputs []string) map[string][][]string {
return p.Impl.GetNeededInputs(node, outputs)
}
func (p ExecOpImplProvider) GetImageName(node Runnable) (string, error) {
if p.Impl.ImageName != "" {
return p.Impl.ImageName, nil
} else {
return p.Impl.GetImageName(node)
}
}
func (p ExecOpImplProvider) GetJobOp(node Runnable) (JobOp, string) {
if p.Impl.GetJobOp == nil {
return nil, ""
}
return p.Impl.GetJobOp(node)
}
func (p ExecOpImplProvider) Resolve(node *VirtualNode, inputDatasets map[string][]Dataset, items map[string][][]Item) ExecutionGraph {
if p.Impl.Resolve == nil {
return nil
}
return p.Impl.Resolve(node, inputDatasets, items)
}
func (p ExecOpImplProvider) Virtualize(node ExecNode) *VirtualNode {
if p.Impl.Virtualize != nil {
return p.Impl.Virtualize(node)
}
parents := make(map[string][]VirtualParent)
for name := range node.Parents {
parents[name] = make([]VirtualParent, len(node.Parents[name]))
for i := range parents[name] {
execParent := node.Parents[name][i]
var graphID GraphID
if execParent.Type == "n" {
graphID.Type = "exec"
} else if execParent.Type == "d" {
graphID.Type = "dataset"
}
graphID.ID = execParent.ID
parents[name][i] = VirtualParent{
GraphID: graphID,
Name: execParent.Name,
DataType: execParent.DataType,
}
}
}
return &VirtualNode{
Name: node.Name,
Op: node.Op,
Params: node.Params,
Parents: parents,
OrigNode: node,
}
}
var ExecOpProviders = make(map[string]ExecOpProvider)
func AddExecOpImpl(impl ExecOpImpl) {
id := impl.Config.ID
if ExecOpProviders[id] != nil {
panic(fmt.Errorf("conflicting provider %s", id))
}
ExecOpProviders[id] = ExecOpImplProvider{impl}
}
func GetExecOp(opName string) ExecOpProvider {
provider := ExecOpProviders[opName]
if provider == nil {
panic(fmt.Errorf("no such provider %s", opName))
}
return provider
} | skyhook/exec_op.go | 0.5769 | 0.418103 | exec_op.go | starcoder |
package codegen
const fragmentTypeTmpl = `
{{- define "ArgumentName" -}}
{{- if .Access -}}
{{ .ArgumentName }}()
{{- else if .MutableAccess -}}
mutable_{{ .ArgumentName }}()
{{- else -}}
{{ .ArgumentName }}
{{- end -}}
{{- end -}}
{{- define "ArgumentValue" -}}
{{- if .Access -}}
{{ .ArgumentName }}()
{{- else if .MutableAccess -}}
mutable_{{ .ArgumentName }}()
{{- else -}}
{{ .ArgumentValue }}
{{- end -}}
{{- end -}}
{{- define "TypeCloseHandles" }}
{{- if or (eq .ArgumentType.Kind HandleKind) (eq .ArgumentType.Kind RequestKind) (eq .ArgumentType.Kind ProtocolKind)}}
{{- if .Pointer }}
{{- if .Nullable }}
if ({{- template "ArgumentName" . }} != nullptr) {
{{- template "ArgumentName" . }}->reset();
}
{{- else }}
{{- template "ArgumentName" . }}->reset();
{{- end }}
{{- else }}
{{- template "ArgumentName" . }}.reset();
{{- end }}
{{- else if eq .ArgumentType.Kind ArrayKind }}
{
{{ .ArgumentType.ElementType.LLDecl }}* {{ .ArgumentName }}_element = {{ template "ArgumentValue" . }}.data();
for (size_t i = 0; i < {{ template "ArgumentValue" . }}.size(); ++i, ++{{ .ArgumentName }}_element) {
{{- template "TypeCloseHandles" NewTypedArgumentElement .ArgumentName .ArgumentType.ElementType }}
}
}
{{- else if eq .ArgumentType.Kind VectorKind }}
{
{{ .ArgumentType.ElementType.LLDecl }}* {{ .ArgumentName }}_element = {{ template "ArgumentValue" . }}.mutable_data();
for (uint64_t i = 0; i < {{ template "ArgumentValue" . }}.count(); ++i, ++{{ .ArgumentName }}_element) {
{{- template "TypeCloseHandles" NewTypedArgumentElement .ArgumentName .ArgumentType.ElementType }}
}
}
{{- else if .Pointer }}
{{- if .Nullable }}
if ({{- template "ArgumentName" . }} != nullptr) {
{{- template "ArgumentName" . }}->_CloseHandles();
}
{{- else }}
{{- template "ArgumentName" . }}->_CloseHandles();
{{- end }}
{{- else }}
{{- template "ArgumentName" . }}._CloseHandles();
{{- end }}
{{- end }}
` | tools/fidl/fidlgen_llcpp/codegen/fragment_type.tmpl.go | 0.527073 | 0.452959 | fragment_type.tmpl.go | starcoder |
package xpfunds
import (
"fmt"
"io/ioutil"
"math"
"strconv"
"strings"
"xpfunds/binarysearch"
"xpfunds/check"
"xpfunds/median"
)
type Fund struct {
name string
active string
min string
// The monthly return of the fund, starting from the last month.
monthly []float64
// The position of the first slice determines the dimension. The position of
// the second slice indicates an end time of a period and the third position
// the difference from the start time to the end time of a period. Arbitrary
// range.
features [][][]float64
// Same as fieldValues, but holds the ratio of the value in this fund to the
// value in the fund with the highest value of this field. Range: 0-1.
ratio [][][]float64
}
func NewFund(monthly []float64) *Fund {
f := &Fund{
monthly: monthly,
}
f.setFeatures()
f.makeRatio()
return f
}
func (f *Fund) setFeatures() {
f.setReturn()
f.setStdDev()
f.setNegativeMonthRatio()
f.setMedian()
f.setGreatestFall()
}
func (f *Fund) setReturn() {
ret := make([][]float64, len(f.monthly))
for end, monthly := range f.monthly {
ret[end] = make([]float64, len(f.monthly)-end)
ret[end][0] = monthly
for diff := 1; diff < len(f.monthly)-end; diff++ {
ret[end][diff] = ret[end][diff-1] * f.monthly[end+diff]
}
for diff := 1; diff < len(f.monthly)-end; diff++ {
ret[end][diff] = math.Pow(ret[end][diff], 1.0/float64(diff+1))
}
}
f.features = append(f.features, ret)
}
func (f *Fund) setMedian() {
med := make([][]float64, len(f.monthly))
for end, monthly := range f.monthly {
med[end] = make([]float64, len(f.monthly)-end)
med[end][0] = monthly
returns := []float64{monthly}
for diff := 1; diff < len(f.monthly)-end; diff++ {
returns = binarysearch.InsertInSorted(returns, f.monthly[end+diff])
med[end][diff] = median.MedianFromSorted(returns)
}
}
f.features = append(f.features, med)
}
func (f *Fund) setStdDev() {
stdDev := make([][]float64, len(f.monthly))
for end, monthly := range f.monthly {
stdDev[end] = make([]float64, len(f.monthly)-end)
stdDev[end][0] = 0
total := monthly
for diff := 1; diff < len(f.monthly)-end; diff++ {
total += f.monthly[end+diff]
count := float64(diff + 1)
avg := total / count
sumDiffs := 0.0
for i := end; i <= end+diff; i++ {
diff := f.monthly[i] - avg
sumDiffs += diff * diff
}
stdDev[end][diff] = math.Sqrt(sumDiffs / count)
}
}
f.features = append(f.features, stdDev)
}
func (f *Fund) setNegativeMonthRatio() {
nmr := make([][]float64, len(f.monthly))
for end := range f.monthly {
nmr[end] = make([]float64, len(f.monthly)-end)
negative := 0
nonNegative := 0
for diff := 0; diff < len(f.monthly)-end; diff++ {
if f.monthly[end+diff] < 1 {
negative++
} else {
nonNegative++
}
nmr[end][diff] = float64(negative) / float64(negative+nonNegative)
}
}
f.features = append(f.features, nmr)
}
func (f *Fund) setGreatestFall() {
gf := make([][]float64, len(f.monthly))
gfl := make([][]float64, len(f.monthly))
for end := range f.monthly {
gf[end] = make([]float64, len(f.monthly)-end)
gfl[end] = make([]float64, len(f.monthly)-end)
greatestFall := 1.0
greatestFallLen := 0
curr := 1.0
currLen := 0
for diff := 0; diff < len(f.monthly)-end; diff++ {
curr *= f.monthly[end+diff]
currLen++
if f.monthly[end+diff] < curr {
curr = f.monthly[end+diff]
currLen = 1
}
if curr < greatestFall {
greatestFall = curr
greatestFallLen = currLen
}
gf[end][diff] = greatestFall
gfl[end][diff] = float64(greatestFallLen)
}
}
f.features = append(f.features, gf, gfl)
}
func (f *Fund) makeRatio() {
f.ratio = make([][][]float64, f.FeatureCount())
for feature := range f.ratio {
f.ratio[feature] = make([][]float64, f.Duration())
for end := range f.ratio[feature] {
f.ratio[feature][end] = make([]float64, f.Duration()-end)
}
}
}
func ReadFunds() []*Fund {
text, err := ioutil.ReadFile("get.tsv")
check.Check(err)
var funds []*Fund
for _, line := range strings.Split(string(text), "\n") {
f := fundFromLine(line)
if f == nil {
continue
}
funds = append(funds, f)
}
SetRatio(funds)
return funds
}
func fundFromLine(line string) *Fund {
fields := strings.Split(strings.Trim(line, "\n"), "\t")
if len(fields) < 6 {
return nil
}
var monthly []float64
for i := 5; i < len(fields); i++ {
v, err := strconv.ParseFloat(strings.Replace(fields[i], ",", ".", 1), 64)
check.Check(err)
monthly = append(monthly, 1.0+v/100.0)
}
f := NewFund(monthly)
f.name = fields[0]
f.active = fields[4]
f.min = fields[1]
return f
}
func (f *Fund) FeatureCount() int {
return len(f.features)
}
func (f *Fund) Duration() int {
return len(f.monthly)
}
// End is inclusive, start is exclusive
func (f *Fund) Weighted(weight []float64, end, start int) float64 {
total := 0.0
for i, w := range weight {
total += f.ratio[i][end][start-1-end] * w
}
return total
}
func (f *Fund) Return(end, start int) float64 {
return f.Weighted([]float64{1}, end, start)
}
func (f *Fund) Print() string {
return fmt.Sprintf("%v\t%v\t%v", f.name, f.active, f.min)
}
func SetRatio(funds []*Fund) {
duration := MaxDuration(funds)
for feature := 0; feature < funds[0].FeatureCount(); feature++ {
for end := 0; end < duration; end++ {
for diff := 0; diff < duration-end; diff++ {
highest := -999999.99
for _, f := range funds {
if f.Duration() <= end+diff {
continue
}
if f.features[feature][end][diff] > highest {
highest = f.features[feature][end][diff]
}
}
for _, f := range funds {
if f.Duration() <= end+diff {
continue
}
if highest == 0 {
f.ratio[feature][end][diff] = 1
continue
}
f.ratio[feature][end][diff] = f.features[feature][end][diff] / highest
}
}
}
}
}
func MaxDuration(funds []*Fund) int {
duration := 0
for _, f := range funds {
if f.Duration() > duration {
duration = f.Duration()
}
}
return duration
} | src/xpfunds/xpfunds.go | 0.647241 | 0.511473 | xpfunds.go | starcoder |
package panorama
import (
"fmt"
"math"
"github.com/gotk3/gotk3/cairo"
"github.com/gotk3/gotk3/gtk"
"github.com/ftl/hamradio/bandplan"
"github.com/ftl/panacotta/core"
)
type rect struct {
top, left, bottom, right float64
}
func (r rect) width() float64 {
return math.Abs(r.left - r.right)
}
func (r rect) height() float64 {
return math.Abs(r.top - r.bottom)
}
func (r rect) contains(p point) bool {
return r.left <= p.x && r.right >= p.x && r.top <= p.y && r.bottom >= p.y
}
func (r rect) toX(f core.Frct) float64 {
return r.left + r.width()*float64(f)
}
func (r rect) toY(f core.Frct) float64 {
return r.bottom - r.height()*float64(f)
}
type point struct {
x, y float64
}
type geometry struct {
mouse point
widget rect
dbScale rect
bandIndicator rect
frequencyScale rect
modeIndicator rect
fft rect
vfo rect
peaks []rect
waterfall rect
}
var dim = struct {
spacing float64
modeIndicatorHeight float64
frequencyScaleFontSize float64
dbScaleFontSize float64
fftWaterfallRatio float64
}{
spacing: 2.0,
modeIndicatorHeight: 5.0,
frequencyScaleFontSize: 10.0,
dbScaleFontSize: 10.0,
fftWaterfallRatio: 0.5,
}
type colorMap []struct{ r, g, b float64 }
func (c colorMap) toRGB(f core.Frct) (r, g, b float64) {
adaptedHeat := float64(f) * float64(len(c)-1)
colorIndex := int(adaptedHeat)
lower := c[int(math.Min(float64(colorIndex), float64(len(c)-1)))]
upper := c[int(math.Min(float64(colorIndex+1), float64(len(c)-1)))]
p := adaptedHeat - float64(colorIndex)
r = (1-p)*lower.r + p*upper.r
g = (1-p)*lower.g + p*upper.g
b = (1-p)*lower.b + p*upper.b
return
}
// var waterfallColors = colorMap{
// {0, 0, 0}, {1, 1, 1},
// }
var waterfallColors = colorMap{
{0, 0, 0}, {0, 0, 1}, {0, 1, 1}, {1, 1, 0}, {1, 0, 0}, {1, 1, 1},
}
func (v *View) onDraw(da *gtk.DrawingArea, cr *cairo.Context) {
data := v.data
fillBackground(cr)
g := v.prepareGeometry(da, cr)
g.dbScale = drawDBScale(cr, g, data)
g.bandIndicator = drawBandIndicator(cr, g, data)
g.frequencyScale = drawFrequencyScale(cr, g, data)
g.modeIndicator = drawModeIndicator(cr, g, data)
g.fft = drawFFT(cr, g, data)
g.waterfall = v.drawWaterfall(cr, g, data)
g.peaks = drawPeaks(cr, g, data)
g.vfo = drawVFO(cr, g, data)
v.geometry = g
if !v.sizeInitialized {
v.sizeInitialized = true
v.controller.SetPanoramaSize(core.Px(g.fft.width()), core.Px(g.fft.height()))
}
}
func fillBackground(cr *cairo.Context) {
cr.Save()
defer cr.Restore()
cr.SetSourceRGB(0, 0, 0)
cr.Paint()
}
func (v *View) prepareGeometry(da *gtk.DrawingArea, cr *cairo.Context) geometry {
cr.Save()
defer cr.Restore()
result := geometry{
mouse: point{x: v.mouse.x, y: v.mouse.y},
widget: rect{bottom: float64(da.GetAllocatedHeight()), right: float64(da.GetAllocatedWidth())},
}
cr.SetFontSize(dim.frequencyScaleFontSize)
frequencyScaleExtents := cr.TextExtents("Hg")
cr.SetFontSize(dim.dbScaleFontSize)
dbScaleExtents := cr.TextExtents("-000.0dB")
result.frequencyScale.bottom = frequencyScaleExtents.Height + 2*dim.spacing
result.modeIndicator.bottom = result.frequencyScale.bottom + 2*dim.modeIndicatorHeight
result.dbScale.right = dbScaleExtents.Width + 2*dim.spacing
result.fft = rect{
top: result.modeIndicator.bottom,
left: result.dbScale.right,
bottom: result.modeIndicator.bottom + (result.widget.bottom-result.modeIndicator.bottom)*(1.0-dim.fftWaterfallRatio),
right: result.widget.right,
}
return result
}
func drawDBScale(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := rect{
right: g.fft.left,
top: g.fft.top,
bottom: g.fft.bottom,
}
cr.SetFontSize(dim.dbScaleFontSize)
cr.SetSourceRGB(0.8, 0.8, 0.8)
cr.SetLineWidth(0.5)
cr.SetDash([]float64{2, 2}, 0)
for _, mark := range data.DBScale {
y := r.toY(mark.Y)
cr.MoveTo(r.right, y)
cr.LineTo(g.widget.right, y)
// TODO maybe use a color indication for the signal level similar to the waterfall
cr.Stroke()
dbText := fmt.Sprintf("%.0fdB", mark.DB)
extents := cr.TextExtents(dbText)
cr.MoveTo(r.right-extents.Width-dim.spacing, y+extents.Height/2)
cr.ShowText(dbText)
}
cr.SetSourceRGB(1.0, 0.3, 0.3)
cr.SetLineWidth(1.0)
cr.SetDash([]float64{2, 2}, 0)
y := r.toY(data.PeakThresholdLevel)
cr.MoveTo(r.left, y)
cr.LineTo(g.widget.right, y)
cr.Stroke()
return r
}
func drawBandIndicator(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := rect{
left: g.dbScale.left,
right: g.dbScale.right,
bottom: g.dbScale.top,
}
mouseOver := r.contains(g.mouse)
if mouseOver {
cr.SetSourceRGB(1, 1, 1)
} else {
cr.SetSourceRGB(0.8, 0.8, 0.8)
}
cr.SetFontSize(15.0)
bandText := string(data.Band.Name)
extents := cr.TextExtents(bandText)
x := (r.right - extents.Width - dim.spacing)
y := (r.bottom + extents.Height) / 2
cr.MoveTo(x, y)
cr.ShowText(bandText)
cr.SetSourceRGB(0.8, 0.8, 0.8)
cr.SetLineWidth(0.5)
cr.MoveTo(r.left, r.bottom)
cr.LineTo(r.right, r.bottom)
cr.Stroke()
return r
}
func drawFrequencyScale(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := rect{
left: g.fft.left,
right: g.fft.right,
bottom: g.frequencyScale.bottom,
}
cr.SetFontSize(dim.frequencyScaleFontSize)
cr.SetSourceRGB(0.8, 0.8, 0.8)
cr.SetLineWidth(0.5)
cr.SetDash([]float64{2, 2}, 0)
for _, mark := range data.FrequencyScale {
x := r.toX(mark.X)
if x < r.left || x > r.right {
continue
}
cr.MoveTo(x, r.top)
cr.LineTo(x, g.fft.bottom)
cr.Stroke()
freqText := fmt.Sprintf("%.0fk", float64(mark.Frequency)/1000.0)
cr.MoveTo(x+dim.spacing, r.bottom-dim.spacing)
cr.ShowText(freqText)
}
return r
}
func drawModeIndicator(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := rect{
left: g.frequencyScale.left,
top: g.frequencyScale.bottom,
right: g.frequencyScale.right,
bottom: g.modeIndicator.bottom,
}
cr.SetLineWidth(1.0)
for _, portion := range data.Band.Portions {
startX := r.toX(core.ToFrequencyFrct(portion.From, data.FrequencyRange))
endX := r.toX(core.ToFrequencyFrct(portion.To, data.FrequencyRange))
if endX < r.left || startX > r.right {
continue
}
startX = math.Max(r.left, startX)
endX = math.Min(r.right, endX)
var yOffset float64
switch portion.Mode {
case bandplan.ModeCW:
cr.SetSourceRGB(0.4, 0, 0.4)
case bandplan.ModePhone:
cr.SetSourceRGB(0.2, 0.4, 0)
case bandplan.ModeDigital:
cr.SetSourceRGB(0, 0, 0.6)
case bandplan.ModeBeacon:
cr.SetSourceRGB(1, 0, 0)
case bandplan.ModeContest:
cr.SetSourceRGB(0.6, 0.3, 0)
yOffset = dim.modeIndicatorHeight
}
cr.Rectangle(startX, r.top+yOffset, endX-startX, dim.modeIndicatorHeight)
cr.Fill()
}
return r
}
func drawFFT(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := g.fft
if len(data.Spectrum) == 0 {
return r
}
startX := r.toX(data.Spectrum[0].X)
cr.SetSourceRGBA(1, 1, 1, 0.3)
cr.MoveTo(startX, r.bottom)
for _, p := range data.Spectrum {
cr.LineTo(r.toX(p.X), r.toY(p.Y))
}
cr.LineTo(r.toX(data.Spectrum[len(data.Spectrum)-1].X), r.bottom)
cr.ClosePath()
cr.Fill()
cr.SetSourceRGB(1, 1, 1)
cr.SetLineWidth(1.0)
cr.MoveTo(startX, r.toY(data.Spectrum[0].Y))
for _, p := range data.Spectrum {
cr.LineTo(r.toX(p.X), r.toY(p.Y))
}
cr.Stroke()
return r
}
func drawVFO(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := rect{
top: g.fft.top,
bottom: g.waterfall.bottom,
}
freqX := g.fft.toX(data.VFOLine)
padding := 4.0
filterX := g.fft.toX(data.VFOFilterFrom)
filterWidth := g.fft.toX(data.VFOFilterTo) - g.fft.toX(data.VFOFilterFrom)
r.left = filterX
r.right = filterX + filterWidth
mouseOver := r.contains(g.mouse)
if mouseOver {
cr.SetSourceRGBA(0.6, 0.9, 1.0, 0.5)
} else {
cr.SetSourceRGBA(0.6, 0.9, 1.0, 0.2)
}
cr.Rectangle(filterX, r.top, filterWidth, r.height())
cr.Fill()
cr.SetLineWidth(1.5)
cr.SetSourceRGB(0.6, 0.9, 1.0)
cr.MoveTo(freqX, r.top)
cr.LineTo(freqX, r.bottom)
cr.Stroke()
cr.SetFontSize(15.0)
freqText := fmt.Sprintf("%s:%.2fkHz", data.VFO.Name, data.VFO.Frequency/1000)
freqExtents := cr.TextExtents(freqText)
leftSide := freqX+padding+freqExtents.Width < g.fft.right
if leftSide {
cr.MoveTo(freqX+padding, r.top+freqExtents.Height+padding)
} else {
cr.MoveTo(freqX-padding-freqExtents.Width, r.top+freqExtents.Height+padding)
}
cr.ShowText(freqText)
cr.SetFontSize(10.0)
sMeterText := core.SUnit(data.VFOSignalLevel).String()
sMeterExtents := cr.TextExtents(sMeterText)
if leftSide {
cr.MoveTo(freqX+padding, r.top+freqExtents.Height+sMeterExtents.Height+2*padding)
} else {
cr.MoveTo(freqX-padding-sMeterExtents.Width, r.top+freqExtents.Height+sMeterExtents.Height+2*padding)
}
cr.ShowText(sMeterText)
return r
}
func drawPeaks(cr *cairo.Context, g geometry, data core.Panorama) []rect {
cr.Save()
defer cr.Restore()
padding := 4.0
peakWidth := (g.fft.toX(data.VFOFilterTo) - g.fft.toX(data.VFOFilterFrom)) / 3.0
result := make([]rect, len(data.Peaks))
for i, peak := range data.Peaks {
maxX := g.fft.toX(peak.MaxX)
fromX := maxX - peakWidth
toX := maxX + peakWidth
y := g.fft.toY(peak.ValueY)
r := rect{
left: fromX,
top: g.fft.top,
right: toX,
bottom: g.waterfall.bottom,
}
mouseOver := r.contains(g.mouse)
cr.SetFontSize(12.0)
markText := "\u25BC"
markExtents := cr.TextExtents(markText)
markTextY := y
cr.SetSourceRGB(0.3, 1, 0.8)
cr.MoveTo(maxX-markExtents.Width/2, markTextY)
cr.ShowText(markText)
cr.SetFontSize(10.0)
freqText := fmt.Sprintf("%.2fkHz", peak.MaxFrequency/1000)
freqExtents := cr.TextExtents(freqText)
sMeterText := fmt.Sprintf("%s", core.SUnit(peak.ValueDB).String())
sMeterExtents := cr.TextExtents(sMeterText)
freqTextY := markTextY - 2*dim.spacing - markExtents.Height - sMeterExtents.Height
sMeterTextY := freqTextY + dim.spacing + sMeterExtents.Height
leftSide := maxX+padding+freqExtents.Width < g.fft.right
if mouseOver {
cr.SetSourceRGBA(0.3, 1, 0.8, 0.4)
cr.Rectangle(r.left, r.top, r.width(), r.height())
cr.Fill()
cr.SetSourceRGB(0.3, 1, 0.8)
if leftSide {
cr.MoveTo(maxX+padding, freqTextY)
} else {
cr.MoveTo(maxX-padding-freqExtents.Width, freqTextY)
}
cr.ShowText(freqText)
if leftSide {
cr.MoveTo(maxX+padding, sMeterTextY)
} else {
cr.MoveTo(maxX-padding-sMeterExtents.Width, sMeterTextY)
}
cr.ShowText(sMeterText)
} else {
cr.SetSourceRGBA(0.3, 1, 0.8, 0.2)
}
result[i] = r
}
return result
}
func (v *View) drawWaterfall(cr *cairo.Context, g geometry, data core.Panorama) rect {
cr.Save()
defer cr.Restore()
r := rect{
top: g.fft.bottom,
bottom: g.widget.bottom,
left: g.fft.left,
right: g.fft.right,
}
stride := cairo.FormatStrideForWidth(cairo.FORMAT_RGB24, int(r.width()))
bytesPerPx := stride / int(r.width())
length := int(stride * int(r.height()))
if v.waterfall == nil || len(v.waterfall) != length {
v.waterfall = make([]byte, length)
}
waterline := make([]byte, stride)
for i := range data.Waterline {
j := i * bytesPerPx
if 0 > j || j >= len(waterline) {
continue
}
r, g, b := waterfallColors.toRGB(data.Waterline[i])
waterline[j+0] = byte(b * float64(255))
waterline[j+1] = byte(g * float64(255))
waterline[j+2] = byte(r * float64(255))
}
v.waterfall = append(waterline, v.waterfall[:length-stride]...)
imageSurface, _ := cairo.CreateImageSurfaceForData(v.waterfall, cairo.FORMAT_RGB24, int(r.width()), int(r.height()), stride)
defer imageSurface.Close()
cr.SetSourceSurface(imageSurface, r.left, r.top)
cr.Paint()
return r
} | ui/panorama/draw.go | 0.675336 | 0.407746 | draw.go | starcoder |
package main
import (
"fmt"
"os"
"strconv"
"math"
"encoding/csv"
)
type CensusGroup struct {
population int
latitude, longitude float64
}
func ParseCensusData(fname string) ([]CensusGroup, error) {
file, err := os.Open(fname)
if err != nil {
return nil, err
}
defer file.Close()
records, err := csv.NewReader(file).ReadAll()
if err != nil {
return nil, err
}
censusData := make([]CensusGroup, 0, len(records))
for _, rec := range records {
if len(rec) == 7 {
population, err1 := strconv.Atoi(rec[4])
latitude, err2 := strconv.ParseFloat(rec[5], 64)
longitude, err3 := strconv.ParseFloat(rec[6], 64)
if err1 == nil && err2 == nil && err3 == nil {
latpi := latitude * math.Pi / 180
latitude = math.Log(math.Tan(latpi) + 1 / math.Cos(latpi))
censusData = append(censusData, CensusGroup{population, latitude, longitude})
}
}
}
return censusData, nil
}
func main () {
if len(os.Args) < 4 {
fmt.Printf("Usage:\nArg 1: file name for input data\nArg 2: number of x-dim buckets\nArg 3: number of y-dim buckets\nArg 4: -v1, -v2, -v3, -v4, -v5, or -v6\n")
return
}
fname, ver := os.Args[1], os.Args[4]
xdim, err := strconv.Atoi(os.Args[2])
if err != nil {
fmt.Println(err)
return
}
ydim, err := strconv.Atoi(os.Args[3])
if err != nil {
fmt.Println(err)
return
}
censusData, err := ParseCensusData(fname)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%v%v\n", "censusData :: ", len(censusData));
// Some parts may need no setup code
var populationTotal int = 0
var maxLatitude float64 = censusData[0].latitude
var minLatidude float64 = censusData[0].latitude
var maxLongitude float64 = censusData[0].longitude
var minLongitude float64 = censusData[0].longitude
grid2D := make([][]int, xdim)
for i := 0; i < xdim; i++ {
grid2D[i] = make ([]int, ydim)
}
switch ver {
case "-v1":
// YOUR SETUP CODE FOR PART 1
for i := 0; i < len(censusData); i++ { // Can start with i=1 to reduce computation
if censusData[i].latitude > maxLatitude {
maxLatitude = censusData[i].latitude
}
if censusData[i].latitude < minLatidude {
minLatidude = censusData[i].latitude
}
if censusData[i].longitude > maxLongitude {
maxLongitude = censusData[i].longitude
}
if censusData[i].longitude < minLongitude {
minLongitude = censusData[i].longitude
}
populationTotal += censusData[i].population
}
fmt.Printf("%v%v\n", "xdim :: ",xdim);
fmt.Printf("%v%v\n", "ydim :: ",ydim);
fmt.Printf("%v%v\n", "populationTotal :: ", populationTotal);
fmt.Printf("%v%v\n%v%v\n%v%v\n%v%v\n", "maxLatitude :: ",maxLatitude, "minLatitude :: ", minLatidude,"maxLongitude :: ",maxLongitude,"minLongitude :: ", minLongitude );
case "-v2":
// YOUR SETUP CODE FOR PART 2
case "-v3":
// YOUR SETUP CODE FOR PART 3
// Part 3 - make a 2D array
for i := 0; i < len(censusData); i++ { // Can start with i=1 to reduce computation
if censusData[i].latitude > maxLatitude {
maxLatitude = censusData[i].latitude
}
if censusData[i].latitude < minLatidude {
minLatidude = censusData[i].latitude
}
if censusData[i].longitude > maxLongitude {
maxLongitude = censusData[i].longitude
}
if censusData[i].longitude < minLongitude {
minLongitude = censusData[i].longitude
}
populationTotal += censusData[i].population
}
var x_chunk float64 = (maxLongitude - minLongitude) / float64(xdim)
var y_chunk float64 = (maxLatitude - minLatidude) / float64(ydim)
var queryWest float64
var queryEast float64
var querySouth float64
var queryNorth float64
// Version 3: Step 1
for k := 0; k<len(censusData); k++ {
for i := 0; i<xdim;i++ {
for j := 0; j<ydim; j++ {
queryWest = minLongitude + float64(x_chunk)*float64(i)
queryEast = minLongitude + float64(x_chunk)*float64(i)+x_chunk
querySouth = minLatidude + float64(y_chunk)*float64(j)
queryNorth = minLatidude + float64(y_chunk)*float64(j)+y_chunk
if censusData[k].longitude >= queryWest && censusData[k].longitude <= queryEast && censusData[k].latitude >= querySouth && censusData[k].latitude <= queryNorth {
grid2D[i][j] = grid2D[i][j] + censusData[k] .population
}
}
}
}
// Version 3: Step 2
var tempi = 0
var tempj = 0
for j := ydim-1; j>=0; j-- {
for i := 0; i<xdim; i++ {
tempi = i-1
tempj = j+1
if ((tempi < 0) && tempj<=ydim-1) {
grid2D[i][j] = grid2D[i][j] + grid2D[i][tempj]
} else if (tempi>=0 && (tempj >= ydim)) {
grid2D[i][j] = grid2D[i][j] + grid2D[tempi][j]
} else if ((tempi < 0) && (tempj >= ydim)) {
grid2D[i][j] = grid2D[i][j]
} else {
// fmt.Printf("%v\t%v\n", tempi, tempj)
grid2D[i][j] = grid2D[i][j] + grid2D[tempi][j] + grid2D[i][tempj] - grid2D[tempi][tempj]
}
}
}
fmt.Printf("%v%v\n", "pls be the right pop => grid2D[xdim-1][0] :: ",grid2D[xdim-1][0])
case "-v4":
// YOUR SETUP CODE FOR PART 4
case "-v5":
// YOUR SETUP CODE FOR PART 5
case "-v6":
// YOUR SETUP CODE FOR PART 6
default:
fmt.Println("Invalid version argument")
return
}
for {
var west, south, east, north int
n, err := fmt.Scanln(&west, &south, &east, &north)
if n != 4 || err != nil || west<1 || west>xdim || south<1 || south>ydim || east<west || east>xdim || north<south || north>ydim {
break
}
var population int
var percentage float64
switch ver {
case "-v1":
// YOUR QUERY CODE FOR PART 1
west = west - 1 // ahh never mind
south = south - 1 // ahh never mind
var x_chunk float64 = (maxLongitude - minLongitude) / float64(xdim)
var y_chunk float64 = (maxLatitude - minLatidude) / float64(ydim)
var queryWest float64 = minLongitude + float64(x_chunk)*float64(west)
var queryEast float64 = minLongitude + float64(x_chunk)*float64(east)
var querySouth float64 = minLatidude + float64(y_chunk)*float64(south)
var queryNorth float64 = minLatidude + float64(y_chunk)*float64(north)
for i := 0; i<len(censusData); i++ {
if censusData[i].longitude >= queryWest && censusData[i].longitude <= queryEast && censusData[i].latitude >= querySouth && censusData[i].latitude <= queryNorth {
population += censusData[i].population
}
}
percentage = (float64(population)/float64(populationTotal)) * float64(100)
case "-v2":
// YOUR QUERY CODE FOR PART 2
case "-v3":
// YOUR QUERY CODE FOR PART 3
var above, left, above_left int
if north+1 <= ydim {
above = grid2D[east-1][north]
} else {
above = 0
}
if west-1 >= 1 {
left = grid2D[west-2][south-1]
} else {
left = 0
}
if west-1 >= 1 && north+1 <= ydim {
above_left = grid2D[west-2][north]
} else {
above_left = 0
}
population = grid2D[east-1][south-1] - above - left + above_left
percentage = (float64(population)/float64(populationTotal)) * float64(100)
case "-v4":
// YOUR QUERY CODE FOR PART 4
case "-v5":
// YOUR QUERY CODE FOR PART 5
case "-v6":
// YOUR QUERY CODE FOR PART 6
}
fmt.Printf("%v %.2f%%\n", population, percentage)
}
} | Go/Assignment_5/1_3_backup.go | 0.500977 | 0.536677 | 1_3_backup.go | starcoder |
package configtable
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"reflect"
"strconv"
"strings"
)
const (
typeDelimiter = "!"
columnDelimiter = "|"
structTag = "configtable"
)
type column struct {
name string
colType string
byteLen int
}
// A Decoder reads a Blizzard config table from an input stream.
type Decoder struct {
columns []column
columnNames map[string]int
s *bufio.Scanner
err error
}
func (d *Decoder) line() (string, error) {
if d.err != nil {
return "", d.err
}
if !d.s.Scan() {
d.err = d.s.Err()
if d.err == nil {
d.err = io.EOF
}
return "", d.err
}
return d.s.Text(), nil
}
func (d *Decoder) readHeader() error {
if d.columns != nil {
// already done, don't trigger twice
return nil
}
headerLine, err := d.line()
if err != nil {
return err
}
fullHeaders := strings.Split(headerLine, columnDelimiter)
columns := make([]column, len(fullHeaders))
columnNames := make(map[string]int)
for n, h := range fullHeaders {
bits := strings.Split(h, typeDelimiter)
if len(bits) != 2 {
d.err = fmt.Errorf("configtable: missing type delimiter in header")
return d.err
}
blizzType := strings.Split(strings.ToLower(bits[1]), ":")
if len(blizzType) != 2 {
d.err = fmt.Errorf("configtable: expected type to be TYPENAME:BYTELEN; got %q", bits[1])
return d.err
}
byteLen, err := strconv.Atoi(blizzType[1])
if err != nil {
d.err = fmt.Errorf("configtable: expected type to be TYPENAME:BYTELEN; got %q: %v", bits[1], err)
return d.err
}
if blizzType[0] != "string" && blizzType[0] != "hex" && blizzType[0] != "dec" {
d.err = fmt.Errorf("configtable: unsupported type %q", bits[1])
return d.err
}
columns[n] = column{
name: bits[0],
colType: blizzType[0],
byteLen: byteLen,
}
if _, ok := columnNames[bits[0]]; ok {
d.err = fmt.Errorf("configtable: duplicate column name %q", bits[0])
return d.err
}
columnNames[bits[0]] = n
}
d.columns = columns
d.columnNames = columnNames
return nil
}
func byteWidth(k reflect.Kind) (width int, unsigned bool) {
switch k {
case reflect.Int, reflect.Uint:
return 4, k == reflect.Uint // Go spec specifies at least 32-bits in size
case reflect.Int8, reflect.Uint8:
return 1, k == reflect.Uint8
case reflect.Int16, reflect.Uint16:
return 2, k == reflect.Uint16
case reflect.Int32, reflect.Uint32:
return 4, k == reflect.Uint32
case reflect.Int64, reflect.Uint64:
return 8, k == reflect.Uint64
}
panic(fmt.Sprintf("cannot handle kind %v", k))
}
func isValidPairing(from column, to reflect.Type) bool {
k := to.Kind()
switch {
case k == reflect.String:
// can always convert into a string literally
return true
case from.colType == "string" && k == reflect.Slice && to.Elem().Kind() == reflect.String:
// can convert "string" into a slice of strings
return true
case from.colType == "dec":
// can convert dec into an integer of sufficient width
bw, _ := byteWidth(k)
return bw >= from.byteLen
case from.colType == "hex":
switch {
case k == reflect.Slice && to.Elem().Kind() == reflect.Uint8:
// can convert hex into a slice of bytes
return true
case k == reflect.Array && to.Elem().Kind() == reflect.Uint8:
// can convert hex into an array of bytes of exactly the correct length
return to.Len() == from.byteLen
}
}
return false
}
func convertTo(columnDelimiter *string, from column, value string, to reflect.Value) error {
k := to.Kind()
switch {
case k == reflect.String:
to.SetString(value)
case from.colType == "string" && k == reflect.Slice && to.Type().Elem().Kind() == reflect.String:
// can convert "string" into a slice of strings
delim := " "
if columnDelimiter != nil {
delim = *columnDelimiter
}
bits := strings.Split(value, delim)
bitsV := reflect.ValueOf(bits)
to.Set(bitsV)
case from.colType == "dec":
// can convert dec into an integer of sufficient width
bw, unsigned := byteWidth(k)
if unsigned {
v, err := strconv.ParseUint(value, 10, bw*8)
if err != nil {
return fmt.Errorf("parsing %q: %v", value, err)
}
to.SetUint(v)
} else {
v, err := strconv.ParseInt(value, 10, bw*8)
if err != nil {
return fmt.Errorf("parsing %q: %v", value, err)
}
to.SetInt(v)
}
case from.colType == "hex":
switch {
case k == reflect.Slice && to.Type().Elem().Kind() == reflect.Uint8:
v, err := hex.DecodeString(value)
if err != nil {
return fmt.Errorf("parsing %q: %v", value, err)
}
to.SetBytes(v)
case k == reflect.Array && to.Type().Elem().Kind() == reflect.Uint8:
// can convert hex into an array of bytes of exactly the correct length
vs, err := hex.DecodeString(value)
if err != nil {
return fmt.Errorf("parsing %q: %v", value, err)
}
arrLen := to.Len()
for n, v := range vs {
newN := arrLen - (len(vs) - n)
to.Index(newN).SetUint(uint64(v))
}
}
}
return nil
}
// Decode decodes a line from the config table into a provided struct.
func (d *Decoder) Decode(s interface{}) error {
if err := d.readHeader(); err != nil {
return err
}
if reflect.TypeOf(s).Kind() != reflect.Ptr {
return fmt.Errorf("configtable: cannot decode into non-struct-pointer")
}
v := reflect.Indirect(reflect.ValueOf(s))
st := v.Type()
if !v.IsValid() || st.Kind() != reflect.Struct {
return fmt.Errorf("configtable: cannot decode into non-struct-pointer")
}
// create mappings from column indexes to field indexes.
columnToField := make(map[int]reflect.Value)
columnDelimiters := make(map[int]string)
fields := v.NumField()
for n := 0; n < fields; n++ {
f := st.Field(n)
// cheat and use PkgPath to check if this field is exported.
if f.PkgPath != "" {
// unexported, skip since we won't be able to set it anyway.
continue
}
columnName := f.Name
var columnDelimiter string
if tag := f.Tag.Get(structTag); tag != "" {
if strings.Contains(tag, ",") {
bits := strings.Split(tag, ",")
columnName = bits[0]
columnDelimiter = bits[1]
} else {
columnName = tag
}
}
columnID, ok := d.columnNames[columnName]
if !ok {
continue
}
if !isValidPairing(d.columns[columnID], f.Type) {
return fmt.Errorf("configtable: cannot decode %v into %v", d.columns[columnID], f.Type)
}
columnToField[columnID] = v.Field(n)
if columnDelimiter != "" {
columnDelimiters[columnID] = columnDelimiter
}
}
ln, err := d.line()
if err != nil {
return err
}
bits := strings.Split(ln, columnDelimiter)
if len(bits) != len(d.columns) {
d.err = fmt.Errorf("configtable: column count mismatch: saw %d columns, expected %d", len(bits), len(d.columns))
return d.err
}
for n, s := range bits {
v, ok := columnToField[n]
if !ok {
continue
}
var delim *string
if d, ok := columnDelimiters[n]; ok {
delim = &d
}
if err := convertTo(delim, d.columns[n], s, v); err != nil {
d.err = fmt.Errorf("configtable: %v", err)
return d.err
}
}
return nil
}
// NewDecoder creates a new Decoder from the provided io.Reader.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
s: bufio.NewScanner(r),
}
} | ngdp/configtable/configtable.go | 0.551332 | 0.463384 | configtable.go | starcoder |
package config
import "strings"
//Key is the entity that allows access to values stored within a Values instance.
type Key []string
//NewKey creates a Key with all strings in parts in the returned Key.
//It essentially casts the string slice to a Key.
func NewKey(parts ...string) Key {
return Key(parts)
}
//NewKeySep returns a Key that is the result of strings.Split(source, sep).
func NewKeySep(source, sep string) Key {
return NewKey(strings.Split(source, sep)...)
}
//IsEmpty determines whether or not the length of k is 0.
func (k Key) IsEmpty() bool {
return k.Len() == 0
}
//Len returns the length of k.
func (k Key) Len() int {
return len(k)
}
//Equal determines whether or not k and other are the same length and all individual
//strings are identical at their respective indices.
func (k Key) Equal(other Key) bool {
if k.IsEmpty() && other.IsEmpty() {
return true
}
if len(k) != len(other) {
return false
}
for i, part := range k {
if part != other[i] {
return false
}
}
return true
}
//StartsWith determines whether or not k is at least the same length as other
//and all strings in other appear at the first consecutive indices of k.
func (k Key) StartsWith(other Key) bool {
if other.Len() > k.Len() {
return false
}
for i, part := range other {
if k[i] != part {
return false
}
}
return true
}
//EndsWith determines whether or not k is at least the same length as other
//and all strings in other appear at the last consecutive indices of k.
func (k Key) EndsWith(other Key) bool {
if other.Len() > k.Len() {
return false
}
for i := range other {
part := other[other.Len()-1-i]
if k[k.Len()-1-i] != part {
return false
}
}
return true
}
//Append returns a new Key with all strings from k and other.
func (k Key) Append(others ...Key) Key {
result := NewKey(k...)
for _, other := range others {
result = append(result, other...)
}
return result
}
//AppendStrings returns a new Key with all strings from k and others.
func (k Key) AppendStrings(others ...string) Key {
return k.Append(NewKey(others...))
}
//KeyParser defines an entity that can parse a string and turn it into a Key.
type KeyParser interface {
Parse(k string) Key
}
//KeyParserFunc is a func implementation of KeyParser that takes in a single string
//and returns a Key.
type KeyParserFunc func(k string) Key
//Parse simply calls pf(k).
func (pf KeyParserFunc) Parse(k string) Key {
return pf(k)
}
//SeparatorKeyParser is a KeyParser that creates Keys from the result of calling
//strings.Split() with k and string(SeparatorKeyParser).
type SeparatorKeyParser string
//Parse returns NewKeySep(k, string(p)).
func (p SeparatorKeyParser) Parse(k string) Key {
return NewKeySep(k, string(p))
}
//PeriodSeparatorKeyParser is the default KeyParser set to c.KeyParser in New().
//See SeparatorKeyParser.
const PeriodSeparatorKeyParser = SeparatorKeyParser(".") | key.go | 0.804406 | 0.438184 | key.go | starcoder |
package quadtree
import "fmt"
type Quadrant int
const (
NW Quadrant = iota
NE
SW
SE
)
type Node struct {
area *Area
points []PointPtr
num int
children []*Node
}
func NewNode(a *Area, cap int) *Node {
if cap <= 0 {
return nil
}
return &Node{area: a, points: make([]PointPtr, 0, cap), children: nil}
}
func NewTree(xMin, xMax, yMin, yMax float64, cap int) *Node {
a := NewArea(NewPoint(xMin, yMin), NewPoint(xMax, yMax))
return &Node{area: a, points: make([]PointPtr, 0, cap), children: nil}
}
func (n *Node) isLeaf() bool {
return n.children == nil
}
func (n *Node) contains(p PointPtr) bool {
if n == nil {
return false
}
return n.area.containsPoint(p)
}
func (n *Node) Get(point PointPtr) PointPtr {
if n.points != nil {
for _, p := range n.points {
if point.Equals(p) {
return p
}
}
return nil
} else {
q := n.whichQuadrant(point)
return n.children[q].Get(point)
}
}
func (n *Node) GetArea(a *Area) (collected []PointPtr) {
return n.GetAreaFiltered(a, func(_ PointPtr) bool { return true })
}
func (n *Node) GetAreaFiltered(a *Area, f func(PointPtr) bool) (collected []PointPtr) {
if n.isLeaf() {
collected = make([]PointPtr, 0, len(n.points))
for _, p := range n.points {
if a.containsPoint(p) && f(p) {
collected = append(collected, p)
}
}
return collected
} else {
c := make(chan []PointPtr)
defer close(c)
for _, child := range n.children {
child := child
go func() {
if child.area.intersects(a) {
c <- child.GetArea(a)
} else {
c <- make([]PointPtr, 0)
}
}()
}
collected = make([]PointPtr, 0, n.num)
for i := 0; i < 4; i++ {
collected = append(collected, <-c...)
}
}
return collected
}
func (n *Node) whichQuadrant(p PointPtr) Quadrant {
if p.Y() >= n.area.c.y {
// northern quadrants
if p.X() >= n.area.c.x {
return NE
}
return NW
} else {
// southern quadrants
if p.X() >= n.area.c.x {
return SE
}
return SW
}
}
func (n *Node) split() {
n.children = make([]*Node, 4)
for i, a := range n.area.split() {
n.children[i] = NewNode(a, cap(n.points))
}
var q Quadrant
for _, p := range n.points {
q = n.whichQuadrant(p)
_ = n.children[q].Insert(p)
}
n.points = nil
}
type PointError struct {
msg string
p PointPtr
}
func (e *PointError) Error() string {
return fmt.Sprintf("%s:\n%v", e.msg, e.p)
}
func PointExistsError(p PointPtr) *PointError {
return &PointError{"Point does already exist in Quadtree.", p}
}
func (n *Node) Insert(p PointPtr) error {
if n.isLeaf() && len(n.points) < cap(n.points) {
for _, b := range n.points {
if b.Equals(p) {
return PointExistsError(b)
}
}
n.points = append(n.points, p)
n.num++
return nil
} else {
if n.isLeaf() {
for _, b := range n.points {
if b.Equals(p) {
return PointExistsError(b)
}
}
n.split()
}
q := n.whichQuadrant(p)
err := n.children[q].Insert(p)
if err == nil {
n.num++
}
return err
}
} | node.go | 0.51562 | 0.447883 | node.go | starcoder |
package triangle
import (
"fluorescence/geometry"
"fluorescence/geometry/primitive"
"fluorescence/geometry/primitive/aabb"
"fluorescence/shading/material"
"fmt"
"math"
)
// Triangle is an internal representation of a Triangle geometry contruct
type Triangle struct {
A geometry.Point `json:"a"`
B geometry.Point `json:"b"`
C geometry.Point `json:"c"`
normal geometry.Vector // normal of the Triangle's surface
IsCulled bool `json:"is_culled"` // whether or not the Triangle is culled, or single-sided
mat material.Material
}
// Data holds information needed to contruct a Triangle
// type Data struct {
// A geometry.Point `json:"a"`
// B geometry.Point `json:"b"`
// C geometry.Point `json:"c"`
// IsCulled bool `json:"is_culled"`
// }
// Setup fills calculated fields in an Triangle
func (t *Triangle) Setup() (*Triangle, error) {
if t.A == t.B || t.A == t.C || t.B == t.C {
return nil, fmt.Errorf("Triangle resolves to line or point")
}
t.normal = t.A.To(t.B).Cross(t.A.To(t.C)).Unit()
return t, nil
}
// Intersection computer the intersection of this object and a given ray if it exists
func (t *Triangle) Intersection(ray geometry.Ray, tMin, tMax float64) (*material.RayHit, bool) {
ab := t.A.To(t.B)
ac := t.A.To(t.C)
pVector := ray.Direction.Cross(ac)
determinant := ab.Dot(pVector)
if t.IsCulled && determinant < 1e-7 {
// This ray is parallel to this Triangle or back-facing.
return nil, false
} else if determinant > -1e-7 && determinant < 1e-7 {
return nil, false
}
inverseDeterminant := 1.0 / determinant
tVector := t.A.To(ray.Origin)
u := inverseDeterminant * (tVector.Dot(pVector))
if u < 0.0 || u > 1.0 {
return nil, false
}
qVector := tVector.Cross(ab)
v := inverseDeterminant * (ray.Direction.Dot(qVector))
if v < 0.0 || u+v > 1.0 {
return nil, false
}
// At this stage we can compute time to find out where the intersection point is on the line.
time := inverseDeterminant * (ac.Dot(qVector))
if time >= tMin && time <= tMax {
// ray intersection
return &material.RayHit{
Ray: ray,
NormalAtHit: t.normal,
Time: time,
U: 0,
V: 0,
Material: t.mat,
}, true
}
return nil, false
}
// BoundingBox returns an AABB for this object
func (t *Triangle) BoundingBox(t0, t1 float64) (*aabb.AABB, bool) {
return &aabb.AABB{
A: geometry.Point{
X: math.Min(math.Min(t.A.X, t.A.X), t.C.X) - 1e-7,
Y: math.Min(math.Min(t.A.Y, t.A.Y), t.C.Y) - 1e-7,
Z: math.Min(math.Min(t.A.Z, t.A.Z), t.C.Z) - 1e-7,
},
B: geometry.Point{
X: math.Max(math.Max(t.A.X, t.B.X), t.C.X) + 1e-7,
Y: math.Max(math.Max(t.A.Y, t.B.Y), t.C.Y) + 1e-7,
Z: math.Max(math.Max(t.A.Z, t.B.Z), t.C.Z) + 1e-7,
},
}, true
}
// SetMaterial sets the material of this object
func (t *Triangle) SetMaterial(m material.Material) {
t.mat = m
}
// IsInfinite returns whether this object is infinite
func (t *Triangle) IsInfinite() bool {
return false
}
// IsClosed returns whether this object is closed
func (t *Triangle) IsClosed() bool {
return false
}
// Copy returns a shallow copy of this object
func (t *Triangle) Copy() primitive.Primitive {
newT := *t
return &newT
}
// Unit creates a unit Triangle.
// The points of this Triangle are:
// A: (0, 0, 0),
// B: (1, 0, 0),
// C: (0, 1, 0).
func Unit(xOffset, yOffset, zOffset float64) *Triangle {
t, _ := (&Triangle{
A: geometry.Point{
X: 0.0 + xOffset,
Y: 0.0 + yOffset,
Z: 0.0 + zOffset,
},
B: geometry.Point{
X: 1.0 + xOffset,
Y: 0.0 + yOffset,
Z: 0.0 + zOffset,
},
C: geometry.Point{
X: 0.0 + xOffset,
Y: 1.0 + yOffset,
Z: 0.0 + zOffset,
},
IsCulled: true,
}).Setup()
return t
} | geometry/primitive/triangle/triangle.go | 0.785391 | 0.492188 | triangle.go | starcoder |
package gohbv
import (
"math"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/integrate"
)
// Helper function to calculate MAXBAS triangular weights
// This function outputs the values that are integrated in RoutingMaxbasWeights
func routingMaxbas(x []float64, p_maxbas float64) []float64 {
a := 2 / p_maxbas
c := 4 / (math.Pow(p_maxbas, 2))
var maxbas_x = make([]float64, len(x))
for i := 0; i < len(x); i++ {
maxbas_x[i] = a - math.Abs(float64(x[i])-p_maxbas/2.0)*c
}
return maxbas_x
}
// Calculate MAXBAS triangular weights using trapezoidal integration
func RoutingMaxbasWeights(mPars Parameters) []float64 {
dx := 0.1
var maxbas = make([]float64, int(math.Ceil(mPars.MAXBAS)))
for i := 0; i < int(math.Ceil(mPars.MAXBAS)); i++ {
x := floats.Span(make([]float64, int(1.0/dx)+1), float64(i), float64(i+1))
y := routingMaxbas(x, mPars.MAXBAS)
x_int := integrate.Trapezoidal(x, y)
maxbas[i] = x_int
}
// Adjust the triangular weights for inaccuracies in integratation so that sum equals 1.0
maxbas_sum := 0.0
for i := 0; i < len(maxbas); i++ {
maxbas_sum += maxbas[i]
}
adj_mb_factor := 1.0 / maxbas_sum
for i := 0; i < len(maxbas); i++ {
maxbas[i] = maxbas[i] * adj_mb_factor
}
return maxbas
}
// Snow routine (also called precipitation routine) for calculating snow- or rainfall
// accumulation, melt and refreezing of snow storage.
func SnowRoutine(mState []ModelState, mPars Parameters, inData []InputData, i int) {
mState[i].Snow_solid = mState[i-1].Snow_solid
// Snow cover beginning of day
if mState[i].Snow_solid > 0 {
mState[i].Snow_cover = 1
} else {
mState[i].Snow_cover = 0
}
if inData[i].Temperature <= mPars.TT { // Temperature below threshold
// If air temp bellow threshold (p_TT) then calculate snowfall and refreezing
// Snowfall added to snow storage
mState[i].Snowfall = inData[i].Precipitation * mPars.SFCF
mState[i].Rainfall = 0
mState[i].Snow_solid = mState[i].Snow_solid + mState[i].Snowfall
// Refreezing, moved from liquid to solid snow storage
var pot_refreeze float64 = mPars.CFMAX * mPars.CFR * (mPars.TT - inData[i].Temperature)
refreezing := math.Min(pot_refreeze, mState[i-1].Snow_liquid)
mState[i].Snow_solid = mState[i].Snow_solid + refreezing
mState[i].Snow_liquid = mState[i-1].Snow_liquid - refreezing // free water content in snowpack
// No snowmelt or liquid water infiltrating
mState[i].Snow_melt = 0
mState[i].Liquid_in = 0
} else { // Precipitation as rain and snow can melt
mState[i].Rainfall = inData[i].Precipitation
mState[i].Snowfall = 0
snowmelt_potential := math.Max(mPars.CFMAX*(inData[i].Temperature-mPars.TT), 0.0)
// Snow melt is limited to frozen solid part of the snow pack
mState[i].Snow_melt = math.Min(snowmelt_potential, mState[i].Snow_solid)
// Remove snow melt from the solid part of the snow pack
mState[i].Snow_solid = math.Max(mState[i].Snow_solid-mState[i].Snow_melt, 0.0)
// Snowpack can retain CWH fraction of meltwater, which can later refreeze
// Water holding capacity is updated after subtracting melt from solid part of snow pack
// Max liquid water the snowpack can hold
pot_liqwater_snow := mState[i].Snow_solid * mPars.CWH
// Calculate liquid water in the snowpack, snowmelt and rainfall can be held
// Liquid water in snow pack from previousstep + snowmelt + preciptiation
mState[i].Snow_liquid = mState[i-1].Snow_liquid + inData[i].Precipitation + mState[i].Snow_melt
// pot_liqwater_snow is held in remaining snowpack, rest infiltrates
// Excess meltwater and rainfall goes to infiltration (liquid_in)
// snow_liquid is not "melted" but will be released here when snowpack can no longer hold it
mState[i].Liquid_in = math.Max(mState[i].Snow_liquid-pot_liqwater_snow, 0)
mState[i].Snow_liquid = mState[i].Snow_liquid - mState[i].Liquid_in // Update snowpack liquid water
}
// Update total snow storage, combined solid and liquid part
mState[i].S_snow = mState[i].Snow_solid + mState[i].Snow_liquid
}
func SoilRoutine(mState []ModelState, mPars Parameters, inData []InputData, i int) {
// Soil routine. Recharge and Evapotranspiration
// Split input to soil moisture and upper groundwater recharge
// 1 mm at the time to avoid numerical issues
soil_s_current := mState[i-1].S_soil
soil_s_in := 0.0
recharge_gw_in := 0.0
recharge_gw_in_total := 0.0
if mState[i].Liquid_in > 0 {
liquid_in_last := mState[i].Liquid_in - math.Floor(mState[i].Liquid_in) // last remaining non-whole 1 mm
liquid_in_int := int(math.Floor(mState[i].Liquid_in))
for i := 1; i <= liquid_in_int; i++ { // Note i not used
recharge_gw_in = 1 * math.Pow((soil_s_current/mPars.FC), mPars.BETA) // 1 mm each step
soil_s_in = 1 - recharge_gw_in // 1 mm each step
soil_s_current += soil_s_in
recharge_gw_in_total += recharge_gw_in
// fmt.Printf("%+v\n", recharge_gw_in)
// fmt.Printf("%+v\n", soil_s_current)
// fmt.Printf("%+v\n", math.Pow((soil_s_current/mPars.FC), mPars.BETA))
}
recharge_gw_in = liquid_in_last * math.Pow((soil_s_current/mPars.FC), mPars.BETA)
soil_s_in = liquid_in_last - recharge_gw_in
soil_s_current += soil_s_in
recharge_gw_in_total += recharge_gw_in
mState[i].Recharge_sm = soil_s_current - mState[i-1].S_soil
mState[i].Recharge_gwuz = recharge_gw_in_total
} else {
mState[i].Recharge_gwuz = 0
mState[i].Recharge_sm = 0
}
// ET only if no snow on the ground (as in HBV-light) using mean soils moisture over the recharge day
sm_aet := (soil_s_current-mState[i-1].S_soil)/2 + mState[i-1].S_soil
if mState[i].Snow_cover == 1 {
mState[i].AET = 0
} else {
mState[i].AET = inData[i].PotentialET * math.Min(1, (sm_aet*(1/(mPars.LP*mPars.FC))))
}
mState[i].S_soil = mState[i-1].S_soil - mState[i].AET + mState[i].Recharge_sm
}
func ResponseRoutine(mState []ModelState, mPars Parameters, i int) {
// Groundwater recharge and percolation
mState[i].S_gw_suz = mState[i-1].S_gw_suz + mState[i].Recharge_gwuz
percolation := math.Min(mPars.PERC, mState[i].S_gw_suz)
mState[i].S_gw_suz = mState[i].S_gw_suz - percolation
mState[i].S_gw_slz = mState[i-1].S_gw_slz + percolation
// Groundwater discharge
q_lz := mPars.K2 * mState[i].S_gw_slz
q_uz := mPars.K1 * mState[i].S_gw_suz
q_uzt := mPars.K0 * math.Max(mState[i].S_gw_suz-mPars.UZL, 0)
mState[i].Q_gw = q_lz + q_uz + q_uzt
// Update groundwater storages
// TODO can they go negative? should not be possible but double think it
mState[i].S_gw_slz = mState[i].S_gw_slz - q_lz
mState[i].S_gw_suz = mState[i].S_gw_suz - q_uz - q_uzt
}
// The RoutingRoutine applies maxbas weights to the groundwater response
// to calculate the simulated runoff
func RoutingRoutine(mState []ModelState, mPars Parameters, inData []InputData, i int, maxbas []float64) {
for j := 0; j < len(maxbas); j++ {
ij := i + j
if ij >= len(inData) {
break
}
mState[ij].Q_sim = mState[ij].Q_sim + mState[i].Q_gw*maxbas[j]
}
} | hbv_routines.go | 0.666605 | 0.416945 | hbv_routines.go | starcoder |
package bindings
import (
"github.com/vmware/vsphere-automation-sdk-go/runtime/data"
"github.com/vmware/vsphere-automation-sdk-go/runtime/lib"
"reflect"
)
type BindingType interface {
Definition() data.DataDefinition
Type() data.DataType
}
type VoidType struct{}
func (i VoidType) Definition() data.DataDefinition {
return data.NewVoidDefinition()
}
func (i VoidType) Type() data.DataType {
return data.VOID
}
func NewVoidType() VoidType {
return VoidType{}
}
type IntegerType struct {
}
func (i IntegerType) Definition() data.DataDefinition {
return data.NewIntegerDefinition()
}
func (i IntegerType) Type() data.DataType {
return data.INTEGER
}
func NewIntegerType() IntegerType {
return IntegerType{}
}
//implements BindingType
type StringType struct{}
func NewStringType() StringType {
return StringType{}
}
func (s StringType) Definition() data.DataDefinition {
return data.NewStringDefinition()
}
func (i StringType) Type() data.DataType {
return data.STRING
}
type BooleanType struct{}
func NewBooleanType() BooleanType {
return BooleanType{}
}
func (b BooleanType) Definition() data.DataDefinition {
return data.NewBooleanDefinition()
}
func (i BooleanType) Type() data.DataType {
return data.BOOLEAN
}
type OptionalType struct {
elementType BindingType
}
func NewOptionalType(elementType BindingType) OptionalType {
return OptionalType{elementType: elementType}
}
func (o OptionalType) Definition() data.DataDefinition {
return data.NewOptionalDefinition(o.elementType.Definition())
}
func (i OptionalType) Type() data.DataType {
return data.OPTIONAL
}
func (o OptionalType) ElementType() BindingType {
return o.elementType
}
// ListType Representation of List IDL in Golang Binding
type ListType struct {
elementType BindingType
//this is necessary when ListType is the top most type for conversion.
// When list is part of a struct, this is not used.
// When list is part of a struct, struct will be the top most type for conversion.
// so bindingstruct is not necessary.
bindingStruct reflect.Type
}
func NewListType(elementType BindingType, bindingStruct reflect.Type) ListType {
return ListType{elementType: elementType, bindingStruct: bindingStruct}
}
func (l ListType) SetBindingStruct(typ reflect.Type) {
l.bindingStruct = typ
}
func (l ListType) BindingStruct() reflect.Type {
return l.bindingStruct
}
func (l ListType) ElementType() BindingType {
return l.elementType
}
func (l ListType) Definition() data.DataDefinition {
return data.NewListDefinition(l.elementType.Definition())
}
func (i ListType) Type() data.DataType {
return data.LIST
}
type OpaqueType struct {
}
func NewOpaqueType() OpaqueType {
return OpaqueType{}
}
func (o OpaqueType) Definition() data.DataDefinition {
return data.NewOpaqueDefinition()
}
func (i OpaqueType) Type() data.DataType {
return data.OPAQUE
}
type StructType struct {
name string
fields map[string]BindingType
bindingStruct reflect.Type
canonicalFieldMap map[string]string
validators []Validator
}
func NewStructType(name string,
fields map[string]BindingType,
bindingClass reflect.Type,
canonicalFieldMap map[string]string,
validators []Validator) StructType {
return StructType{name: name, fields: fields, bindingStruct: bindingClass,
canonicalFieldMap: canonicalFieldMap, validators: validators}
}
func (s StructType) Name() string {
return s.name
}
func (s StructType) BindingStruct() reflect.Type {
return s.bindingStruct
}
func (s StructType) Field(fieldName string) BindingType {
return s.fields[fieldName]
}
func (s StructType) CanonicalField(fieldName string) string {
return s.canonicalFieldMap[fieldName]
}
func (s StructType) FieldNames() []string {
var keys = make([]string, 0)
for key, _ := range s.fields {
keys = append(keys, key)
}
return keys
}
func (s StructType) Definition() data.DataDefinition {
fieldDefMap := make(map[string]data.DataDefinition)
for key, field := range s.fields {
fieldDefMap[key] = field.Definition()
}
var result = data.NewStructDefinition(s.name, fieldDefMap)
return result
}
func (i StructType) Type() data.DataType {
return data.STRUCTURE
}
func (s StructType) Validate(structValue *data.StructValue) []error {
if s.validators != nil {
for _, v := range s.validators {
msgs := v.Validate(structValue)
if msgs != nil || len(msgs) > 0 {
return msgs
}
}
}
return nil
}
type MapType struct {
KeyType BindingType
ValueType BindingType
bindingStruct reflect.Type
}
func NewMapType(keyType BindingType, valueType BindingType, bindingStruct reflect.Type) MapType {
return MapType{KeyType: keyType, ValueType: valueType, bindingStruct: bindingStruct}
}
func (m MapType) Definition() data.DataDefinition {
fieldDefs := make(map[string]data.DataDefinition)
fieldDefs[lib.MAP_KEY_FIELD] = m.KeyType.Definition()
fieldDefs[lib.MAP_VALUE_FIELD] = m.ValueType.Definition()
elementDef := data.NewStructDefinition(lib.MAP_ENTRY, fieldDefs)
return data.NewListDefinition(elementDef)
}
func (i MapType) Type() data.DataType {
return data.LIST
}
type IdType struct {
ResourceTypes []string
ResourceTypeHolder string
}
func NewIdType(resourceTypes []string, typeHolder string) IdType {
return IdType{ResourceTypes: resourceTypes, ResourceTypeHolder: typeHolder}
}
func (i IdType) Definition() data.DataDefinition {
return data.NewStringDefinition()
}
func (i IdType) Type() data.DataType {
return data.STRING
}
type EnumType struct {
name string
bindingStruct reflect.Type
}
func (e EnumType) Name() string {
return e.name
}
func (e EnumType) BindingStruct() reflect.Type {
return e.bindingStruct
}
func NewEnumType(name string, bindingStruct reflect.Type) EnumType {
return EnumType{name: name, bindingStruct: bindingStruct}
}
func (e EnumType) Definition() data.DataDefinition {
return data.NewStringDefinition()
}
func (i EnumType) Type() data.DataType {
return data.STRING
}
type SetType struct {
elementType BindingType
bindingStruct reflect.Type
}
func NewSetType(elementType BindingType, bindingStruct reflect.Type) SetType {
return SetType{elementType: elementType, bindingStruct: bindingStruct}
}
func (s SetType) ElementType() BindingType {
return s.elementType
}
func (s SetType) BindingStruct() reflect.Type {
return s.bindingStruct
}
func (s SetType) Definition() data.DataDefinition {
return data.NewListDefinition(s.elementType.Definition())
}
func (i SetType) Type() data.DataType {
return data.LIST
}
type ErrorType struct {
name string
fields map[string]BindingType
bindingStruct reflect.Type
canonicalFieldMap map[string]string
}
func NewErrorType(name string, fields map[string]BindingType, bindingClass reflect.Type, canonicalFieldMap map[string]string) ErrorType {
return ErrorType{name: name, fields: fields, bindingStruct: bindingClass, canonicalFieldMap: canonicalFieldMap}
}
func (e ErrorType) Name() string {
return e.name
}
func (e ErrorType) BindingStruct() reflect.Type {
return e.bindingStruct
}
func (e ErrorType) Field(fieldName string) BindingType {
return e.fields[fieldName]
}
func (e ErrorType) FieldNames() []string {
var keys = make([]string, 0)
for key, _ := range e.fields {
keys = append(keys, key)
}
return keys
}
func (e ErrorType) Definition() data.DataDefinition {
fieldDefMap := make(map[string]data.DataDefinition)
for key, field := range e.fields {
fieldDefMap[key] = field.Definition()
}
var result = data.NewErrorDefinition(e.name, fieldDefMap)
return result
}
func (i ErrorType) Type() data.DataType {
return data.ERROR
}
type DynamicStructType struct {
name string
fields map[string]BindingType
bindingStruct reflect.Type
validator Validator
}
func NewDynamicStructType(hasFieldsOfTypes []ReferenceType, mode ConverterMode) DynamicStructType {
return DynamicStructType{
name: "vmware.vapi.dynamic_struct",
bindingStruct: StructBindingType,
validator: NewHasFieldsOfValidator(hasFieldsOfTypes, mode),
}
}
func (d DynamicStructType) Name() string {
return d.name
}
func (d DynamicStructType) BindingStruct() reflect.Type {
return d.bindingStruct
}
func (d DynamicStructType) Field(fieldName string) BindingType {
return d.fields[fieldName]
}
func (d DynamicStructType) FieldNames() []string {
var keys = make([]string, 0)
for key, _ := range d.fields {
keys = append(keys, key)
}
return keys
}
func (d DynamicStructType) Definition() data.DataDefinition {
return data.NewDynamicStructDefinition()
}
func (i DynamicStructType) Type() data.DataType {
return data.DYNAMIC_STRUCTURE
}
func (d DynamicStructType) Validate(structValue *data.StructValue) []error {
if d.validator != nil {
return d.validator.Validate(structValue)
}
return nil
}
type DoubleType struct {
}
func NewDoubleType() DoubleType {
return DoubleType{}
}
func (d DoubleType) Definition() data.DataDefinition {
return data.NewDoubleDefinition()
}
func (i DoubleType) Type() data.DataType {
return data.DOUBLE
}
type DateTimeType struct {
}
func NewDateTimeType() DateTimeType {
return DateTimeType{}
}
func (d DateTimeType) Definition() data.DataDefinition {
return data.NewStringDefinition()
}
func (i DateTimeType) Type() data.DataType {
return data.STRING
}
type BlobType struct {
}
func NewBlobType() BlobType {
return BlobType{}
}
func (b BlobType) Definition() data.DataDefinition {
return data.NewBlobDefinition()
}
func (i BlobType) Type() data.DataType {
return data.BLOB
}
type SecretType struct {
}
func NewSecretType() SecretType {
return SecretType{}
}
func (s SecretType) Definition() data.DataDefinition {
return data.NewSecretDefinition()
}
func (i SecretType) Type() data.DataType {
return data.SECRET
}
type UriType struct {
}
func NewUriType() UriType {
return UriType{}
}
func (u UriType) Definition() data.DataDefinition {
return data.NewStringDefinition()
}
func (i UriType) Type() data.DataType {
return data.STRING
}
type AnyErrorType struct {
}
func NewAnyErrorType() AnyErrorType {
return AnyErrorType{}
}
func (e AnyErrorType) Definition() data.DataDefinition {
fieldDefMap := make(map[string]data.DataDefinition)
var result = data.NewStructDefinition("Exception", fieldDefMap)
return result
}
func (i AnyErrorType) Type() data.DataType {
return data.ANY_ERROR
}
type BindingTypeFunction func() BindingType
type ReferenceType struct {
Fn BindingTypeFunction
}
func NewReferenceType(fn BindingTypeFunction) ReferenceType {
return ReferenceType{Fn: fn}
}
func (r ReferenceType) Resolve() BindingType {
return r.Fn()
}
func (r ReferenceType) Definition() data.DataDefinition {
return r.Fn().Definition()
}
func (i ReferenceType) Type() data.DataType {
return data.STRUCTURE_REF
} | runtime/bindings/type.go | 0.803906 | 0.513851 | type.go | starcoder |
package continuous
import (
"github.com/jtejido/ggsl/specfunc"
"github.com/jtejido/linear"
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
smath "github.com/jtejido/stats/math"
"math"
"math/rand"
)
// (Scaled) Inverse chi-squared distribution
// https://en.wikipedia.org/wiki/Scaled_inverse_chi-squared_distribution
// https://en.wikipedia.org/wiki/Inverse-chi-squared_distribution
type InverseChiSquared struct {
dof, scale float64 // v, σ2
src rand.Source
natural linear.RealVector
}
func NewInverseChiSquared(dof, scale float64) (*InverseChiSquared, error) {
return NewInverseChiSquaredWithSource(dof, scale, nil)
}
func NewInverseChiSquaredWithSource(dof, scale float64, src rand.Source) (*InverseChiSquared, error) {
if dof <= 0 || scale <= 0 {
return nil, err.Invalid()
}
return &InverseChiSquared{dof, scale, src, nil}, nil
}
// v ∈ (0,∞)
// σ2 ∈ (0,∞)
func (i *InverseChiSquared) Parameters() stats.Limits {
return stats.Limits{
"v": stats.Interval{0, math.Inf(1), true, true},
"σ2": stats.Interval{0, math.Inf(1), true, true},
}
}
// x ∈ (0,∞)
func (i *InverseChiSquared) Support() stats.Interval {
return stats.Interval{0, math.Inf(1), true, true}
}
func (i *InverseChiSquared) Probability(x float64) float64 {
if i.Support().IsWithinInterval(x) {
return (math.Pow(2, -i.dof/2) * math.Exp(-(i.dof*i.scale)/(2*x)) * math.Pow((i.dof*i.scale)/x, i.dof/2)) / (x * specfunc.Gamma(i.dof/2))
}
return 0
}
func (i *InverseChiSquared) Distribution(x float64) float64 {
if i.Support().IsWithinInterval(x) {
return specfunc.Gamma_inc_Q(i.dof/2, (i.scale*i.dof)/(2*x))
}
return 0
}
func (i *InverseChiSquared) Inverse(p float64) float64 {
if p <= 0 {
return 0
}
if p >= 1 {
return math.Inf(1)
}
return (i.dof * i.scale) / (2 * smath.InverseRegularizedLowerIncompleteGamma(i.dof/2, p))
}
func (i *InverseChiSquared) Entropy() float64 {
return (i.dof / 2) + math.Log(((i.scale*i.dof)/2)*specfunc.Gamma(i.dof/2)) - (1+(i.dof/2))*specfunc.Psi(i.dof/2)
}
func (i *InverseChiSquared) ExKurtosis() float64 {
if i.dof > 8 {
return (12 * (5*i.dof - 22)) / ((i.dof - 6) * (i.dof - 8))
}
return math.Inf(1)
}
func (i *InverseChiSquared) Mean() float64 {
if i.dof > 2 {
return (i.dof * i.scale) / (i.dof - 2)
}
return math.Inf(1)
}
func (i *InverseChiSquared) Median() float64 {
return (i.dof * i.scale) / (2 * smath.InverseRegularizedLowerIncompleteGamma(i.dof/2, .5))
}
func (i *InverseChiSquared) Mode() float64 {
return (i.dof * i.scale) / (i.dof + 2)
}
func (i *InverseChiSquared) Skewness() float64 {
if i.dof > 6 {
return (4 * math.Sqrt(2) * math.Sqrt(-4+i.dof)) / (-6 + i.dof)
}
return math.Inf(1)
}
func (i *InverseChiSquared) Variance() float64 {
if i.dof > 4 {
return (2 * (i.dof * i.dof) * (i.scale * i.scale)) / (math.Pow(i.dof-2, 2) * (i.dof - 4))
}
return math.Inf(1)
}
func (i *InverseChiSquared) Rand() float64 {
var rnd float64
if i.src != nil {
rnd = rand.New(i.src).Float64()
} else {
rnd = rand.Float64()
}
return i.Inverse(rnd)
}
func (i *InverseChiSquared) ToExponential() {
vec, _ := linear.NewArrayRealVectorFromSlice([]float64{-(i.dof / 2) - 1, -(i.dof * i.scale) / 2})
i.natural = vec
// n1 := vec.At(0)
// n2 := vec.At(1)
// vec2, _ := linear.NewSizedArrayRealVector(2)
// vec2.SetEntry(0, math.Log((i.dof * i.scale) / 2)-specfunc.Psi((i.dof / 2) + 1))
// vec2.SetEntry(1, -(i.dof + 4)/(i.dof * i.scale))
// i.Moment = vec2
}
func (i *InverseChiSquared) SufficientStatistics(x float64) linear.RealVector {
vec, _ := linear.NewArrayRealVectorFromSlice([]float64{math.Log(x), 1 / x})
return vec
} | dist/continuous/inverse_chi_squared.go | 0.767777 | 0.460168 | inverse_chi_squared.go | starcoder |
package testcase
import (
"fmt"
"testing"
"github.com/adamluzsi/testcase/internal"
)
// Contract meant to represent a Role Interface Contract.
// A role interface is a static code contract that expresses behavioral expectations as a set of method signatures.
// A role interface used by one or many consumers.
// These consumers often use implicit assumptions about how methods of the role interface behave.
// Using these assumptions makes it possible to simplify the consumer code.
// In testcase convention, instead of relying on implicit assumptions, the developer should create an explicit interface testing suite, in other words, a Contract.
// The code that supplies a role interface then able to import a role interface Contract,
// and confirm if the expected behavior is fulfilled by the implementation.
type Contract interface {
// Spec defines the tests on the received *Spec object.
Spec(s *Spec)
}
// OpenContract is a testcase independent Contract interface
type OpenContract interface {
// Test is the function that assert expected behavioral requirements from a supplier implementation.
// These behavioral assumptions made by the Consumer in order to simplify and stabilise its own code complexity.
// Every time a Consumer makes an assumption about the behavior of the role interface supplier,
// it should be clearly defined it with tests under this functionality.
Test(*testing.T)
// Benchmark will help with what to measure.
// When you define a role interface contract, you should clearly know what performance aspects important for your Consumer.
// Those aspects should be expressed in a form of Benchmark,
// so different supplier implementations can be easily A/B tested from this aspect as well.
Benchmark(*testing.B)
}
// type BackwardCompatibleContract struct{ Contract }
// func (c BackwardCompatibleContract) Test(t *testing.T) { c.Contract.Spec(NewSpec(t)) }
// func (c BackwardCompatibleContract) Benchmark(b *testing.B) { c.Contract.Spec(NewSpec(b)) }
// RunContract is a helper function that makes execution one or many Contract easy.
// By using RunContract, you don't have to distinguish between testing or benchmark execution mod.
// It supports *testing.T, *testing.B, *testcase.T, *testcase.Spec and CustomTB test runners.
func RunContract(tb interface{}, contracts ...Contract) {
if tb, ok := tb.(helper); ok {
tb.Helper()
}
for _, c := range contracts {
c := c
switch tb := tb.(type) {
case *Spec:
name := contractName(c)
tb.Context(name, c.Spec, Group(name))
case testing.TB:
s := NewSpec(tb)
defer s.Finish()
c.Spec(s)
default:
panic(fmt.Errorf(`%T is an unknown test runner type`, tb))
}
}
}
func RunOpenContract(tb interface{}, contracts ...OpenContract) {
if tb, ok := tb.(helper); ok {
tb.Helper()
}
for _, c := range contracts {
c := c
switch tb := tb.(type) {
case *Spec:
tb.Test(contractName(c), func(t *T) { RunOpenContract(t, c) })
case *T:
RunOpenContract(tb.TB, c)
case *testing.T:
c.Test(tb)
case *testing.B:
c.Benchmark(tb)
case TBRunner:
tb.Run(contractName(c), func(tb testing.TB) { RunOpenContract(tb, c) })
default:
panic(fmt.Errorf(`%T is an unknown test runner type`, tb))
}
}
}
func contractName(c interface{}) string {
var name string
switch c := c.(type) {
case fmt.Stringer:
name = c.String()
default:
name = internal.SymbolicName(c)
}
return escapeName(name)
} | Contract.go | 0.691497 | 0.549641 | Contract.go | starcoder |
package entity
import "go/ast"
// Miner interface is used to define a custom miner.
type Miner interface {
// Name provides the name of the miner.
Name() string
// Visit applies the mining logic while traversing the Abstract Syntax Tree.
Visit(node ast.Node) ast.Visitor
// SetCurrentFile specifies the current file being mined.
SetCurrentFile(filename string)
// Results returns the results after mining.
Results() interface{}
}
// MinerAbstractFactory is an interface for creating mining algorithm factories.
type MinerAbstractFactory interface {
// Get returns a MinerFactory for the selectd mining algorithm.
Get(algorithm string) (MinerFactory, error)
}
// MinerFactory is an interface for creating mining algorithm instances.
type MinerFactory interface {
// Make returns a mining algorithm instance.
Make() (Miner, error)
}
// ExtractorFactory defines the contract for the factory functions capable of
// building Extractors.
type ExtractorFactory func(filename string) Extractor
// Extractor is used to define a custom identifier extractor.
type Extractor interface {
// Visit applies the extraction logic while traversing the Abstract Syntax Tree.
Visit(node ast.Node) ast.Visitor
// Identifiers returns the extracted identifiers.
Identifiers() []Identifier
}
// Splitter interface is used to define a custom splitter.
type Splitter interface {
// Name returns the name of the custom splitter.
Name() string
// Split returns the split identifier.
Split(token string) []Split
}
// SplitterAbstractFactory is an interface for creating splitting algorithm factories.
type SplitterAbstractFactory interface {
// Get returns a SplitterFactory for the selectd splitting algorithm.
Get(algorithm string) (SplitterFactory, error)
}
// SplitterFactory is an interface for creating splitting algorithm instances.
type SplitterFactory interface {
// Make returns a splitting algorithm instance built from miners.
Make(miners map[string]Miner) (Splitter, error)
}
// Expander interface is used to define a custom expander.
type Expander interface {
// Name returns the name of the custom expander.
Name() string
// ApplicableOn defines the name of splits used as input.
ApplicableOn() string
// Expand performs the expansion on the token as a whole.
Expand(ident Identifier) []Expansion
}
// ExpanderAbstractFactory is an interface for creating expandion algorithm factories.
type ExpanderAbstractFactory interface {
// Get returns a ExpanderFactory for the selectd expansion algorithm.
Get(algorithm string) (ExpanderFactory, error)
}
// ExpanderFactory is an interface for creating expansion algorithm instances.
type ExpanderFactory interface {
// Make returns an expansion algorithm instance built from miners.
Make(miningResults map[string]Miner) (Expander, error)
} | entity/algorithm.go | 0.616243 | 0.427516 | algorithm.go | starcoder |
package bmr
import "math"
func Calculate(gender, standard string, weight, height float64, age int) (int, error) {
switch gender {
case "male":
switch standard {
case "metric":
return calculateMaleMetric(weight, height, age)
case "imperial":
return calculateMaleImperial(weight, height, age)
default:
return 0, ValueTypeError{"Uncompatible measurement standard"}
}
case "female":
switch standard {
case "metric":
return calculateFemaleMetric(weight, height, age)
case "imperial":
return calculateFemaleImperial(weight, height, age)
default:
return 0, ValueTypeError{"Uncompatible measurement standard"}
}
default:
return 0, ValueTypeError{"Uncompatible gender"}
}
}
func calculateMaleMetric(weight, height float64, age int) (int, error) {
if weight < 0 || height < 0 || age < 0 {
return 0, NegativeValueError{"Negative value not allowed."}
}
if weight == 0 || height == 0 || age == 0 {
return 0, ZeroValueError{"Values of zero not allowed."}
}
return int(math.Round(66 + (13.7 * weight) + (5 * height) - (6.8 * float64(age)))), nil
}
func calculateFemaleMetric(weight, height float64, age int) (int, error) {
if weight < 0 || height < 0 || age < 0 {
return 0, NegativeValueError{"Negative value not allowed."}
}
if weight == 0 || height == 0 || age == 0 {
return 0, ZeroValueError{"Values of zero not allowed."}
}
return int(math.Round(655 + (9.6 * weight) + (1.8 * height) - (4.7 * float64(age)))), nil
}
func calculateMaleImperial(weight, height float64, age int) (int, error) {
if weight < 0 || height < 0 || age < 0 {
return 0, NegativeValueError{"Negative value not allowed."}
}
if weight == 0 || height == 0 || age == 0 {
return 0, ZeroValueError{"Values of zero not allowed."}
}
return int(math.Round(66 + (6.23 * weight) + (12.7 * height) - (6.8 * float64(age)))), nil
}
func calculateFemaleImperial(weight, height float64, age int) (int, error) {
if weight < 0 || height < 0 || age < 0 {
return 0, NegativeValueError{"Negative value not allowed."}
}
if weight == 0 || height == 0 || age == 0 {
return 0, ZeroValueError{"Values of zero not allowed."}
}
return int(math.Round(655 + (4.35 * weight) + (4.7 * height) - (4.7 * float64(age)))), nil
} | pkg/bmr/bmr.go | 0.770206 | 0.609757 | bmr.go | starcoder |
package main
import (
"fmt"
"math"
"math/rand"
"sort"
"sync"
"time"
)
// By : <NAME>
// ------- Please Solve the Problem -------
// I’m trying to find the closest point from some points that I have, for example I have about 1000 set of geographhical coordinates (lat,long).
// Given one coordinates, I want to find the closest one from that set.
// Note that the list of point changes all the time, and the closes distance depend on when and where the user’s point.
// What is the best optimized solution for this ?
// Please implement this in a language you are comfortable with and push to github.
const (
// Initial Point
latitude float64 = 0
longitude float64 = 0
// Earth Radius
centerRadius float64 = 6371000 // In meter
)
type point struct {
namePoint string
lat float64
long float64
dist float64 // In meter
}
var tempMem sync.Map
func initPoints() {
for i := 1; i <= 1000; i++ {
la, lo := randLatLngFromCenter(latitude, longitude, centerRadius)
np := fmt.Sprintf("P%04s", fmt.Sprint(i))
tempMem.Store(np, point{
namePoint: np,
lat: la,
long: lo,
})
}
}
// Change all list every 2 seconds
func pointsMover(wg *sync.WaitGroup) {
defer wg.Done()
go func() {
for {
tempMem.Range(func(k, v interface{}) bool {
la, lo := randLatLngFromCenter(latitude, longitude, centerRadius)
tempMem.Store(k, point{
namePoint: k.(string),
lat: la,
long: lo,
})
return true
})
time.Sleep(time.Second * 3)
}
}()
// Runner will run until 30 seconds
time.Sleep(time.Second * 30)
}
// Get list 5 closest point from client lat-long
func getClosestPoint() {
nearby := []point{}
// Random lat-long
la, lo := randLatLngFromCenter(latitude, longitude, centerRadius)
tempMem.Range(func(k, v interface{}) bool {
val := v.(point)
nearby = append(nearby, point{
namePoint: val.namePoint,
lat: val.lat,
long: val.long,
dist: distance(val.lat, la, val.long, lo),
})
// Un-Comment if you want to see current all lists
// fmt.Println("range (): ", v)
return true
})
sort.Slice(nearby, func(i, j int) bool {
return nearby[i].dist < nearby[j].dist
})
// Your point
fmt.Println(fmt.Sprintf("\n====================================\nYour Latitude : %v\nYour Longitude : %v", la, lo))
// Print candidata 5 nearbies
for i, v := range nearby[:5] {
fmt.Println(fmt.Sprintf("-----------------%d------------------\nName Point : %v\nDistance : %.4f Kilometers\nLatitude : %v\nLogitude : %v", i+1, v.namePoint, v.dist/1000, v.lat, v.long))
}
fmt.Println("====================================")
}
// Geographic information systems (GIS) Algorithm
// randLatLngFromCenter (center (for Lat, Long center location), radius (in meter)) returning location Lat and Long
func randLatLngFromCenter(centerLatitude, centerLongitude, radius float64) (float64, float64) {
y0 := centerLatitude
x0 := centerLongitude
rd := radius / 111300
rand.Seed(time.Now().UnixNano())
u := rand.Float64()
v := rand.Float64()
w := rd * math.Sqrt(u)
t := 2 * math.Pi * v
x := w * math.Cos(t)
y := w * math.Sin(t)
x1 := x + x0
y1 := y + y0
return y1, x1
}
// ----- ----- ----- ----- CORE ----- ----- ----- -----
// Get distance from 2 coordinates by Haversine formula
func distance(lat1, lat2, lon1, lon2 float64) float64 {
// The math module contains a function
// named toRadians which converts from
// degrees to radians.
lon1 = lon1 * math.Pi / 180
lon2 = lon2 * math.Pi / 180
lat1 = lat1 * math.Pi / 180
lat2 = lat2 * math.Pi / 180
var dlon = lon2 - lon1
var dlat = lat2 - lat1
var a = math.Pow(math.Sin(dlat/2), 2) + math.Cos(lat1)*math.Cos(lat2)*math.Pow(math.Sin(dlon/2), 2)
var c = 2 * math.Asin(math.Sqrt(a))
var r float64 = 6371000 // Radius of earth in meter.
// calculate the result
return c * r
}
// Flush points
func flush() {
tempMem.Range(func(k, v interface{}) bool {
tempMem.Delete(k)
return true
})
}
func main() {
// This state will generate 1000 random coordinate (lat, long) points
// based from center lat-long (0, 0) and on earth radius (in meters)
initPoints()
var wg sync.WaitGroup
wg.Add(1)
// All list Point Coordinates will update every 3 seconds
go pointsMover(&wg)
// Dummy clients will generate random coordinate (lat, long) points
// Then system get 5 nearbies list points, and based on clients coordinate request
// after get distance by Haversine formula
// Client will get nearby every 5 seconds
go func() {
for {
getClosestPoint()
time.Sleep(time.Second * 5)
}
}()
// Will wait ontil points mover process (30 seconds)
wg.Wait()
// flush memory
flush()
fmt.Println("Server Shut Down")
} | main.go | 0.643665 | 0.443661 | main.go | starcoder |
package main
import (
"fmt"
"math"
"time"
"unicode"
"github.com/faiface/pixel"
"github.com/faiface/pixel/imdraw"
"github.com/faiface/pixel/pixelgl"
"github.com/faiface/pixel/text"
"github.com/golang/freetype/truetype"
"golang.org/x/image/font"
"golang.org/x/image/font/gofont/goregular"
)
func ttfFromBytesMust(b []byte, size float64) font.Face {
ttf, err := truetype.Parse(b)
if err != nil {
panic(err)
}
return truetype.NewFace(ttf, &truetype.Options{
Size: size,
GlyphCacheEntries: 1,
})
}
var DefaultFont = text.NewAtlas(
ttfFromBytesMust(goregular.TTF, 14),
text.ASCII, text.RangeTable(unicode.Latin),
)
type Length int
func (length Length) M() float64 { return length.Float64() / M }
func (length Length) MM() float64 { return length.Float64() / MM }
func (length Length) Screen() float64 { return length.Float64() / 50 }
func (length Length) Float64() float64 { return float64(length) }
func (length Length) Sqrt() Length { return Length(math.Sqrt(float64(length))) }
const (
M = 100000
MM = 100
RadToDeg = 360 / TAU
)
type Vector struct{ X, Y Length }
func (a Vector) Add(b Vector) Vector {
return Vector{
X: a.X + b.X,
Y: a.Y + b.Y,
}
}
func (a Vector) Sub(b Vector) Vector {
return Vector{
X: a.X - b.X,
Y: a.Y - b.Y,
}
}
func (a Vector) Distance(b Vector) Length {
d := a.Sub(b)
return (d.X*d.X + d.Y*d.Y).Sqrt()
}
func (a Vector) Pixel() pixel.Vec {
return pixel.V(a.X.Screen(), a.Y.Screen())
}
func Angle(a, b Vector) float64 {
d := b.Sub(a)
return math.Atan2(d.Y.Float64(), d.X.Float64())
}
type Joint struct {
Pos Vector
Angle float64
Length Length
RelativeAngle float64
}
func (joint *Joint) Reach(target Vector, length Length) {
distance := joint.Pos.Distance(target) + 1
joint.Pos.X = target.X + (joint.Pos.X-target.X)*length/distance
joint.Pos.Y = target.Y + (joint.Pos.Y-target.Y)*length/distance
}
func (tail *Joint) RecalculateAngle(head *Joint) {
tail.Angle = Angle(head.Pos, tail.Pos)
}
func (tail *Joint) String() string {
return fmt.Sprintf("<%.02f %.02f %.0f°>", tail.Pos.X.MM(), tail.Pos.Y.MM(), tail.Angle*RadToDeg)
}
type Leg struct {
Base Joint
Femur Joint
Tibia Joint
Target Vector
Joints [3]*Joint
}
func NewLeg(femurLength, tibiaLength Length) *Leg {
leg := &Leg{}
leg.Femur.Length = femurLength
leg.Tibia.Length = tibiaLength
leg.Joints[0] = &leg.Base
leg.Joints[1] = &leg.Femur
leg.Joints[2] = &leg.Tibia
leg.Reset()
return leg
}
func (leg *Leg) Reset() {
for i := 0; i < len(leg.Joints)-1; i++ {
head, tail := leg.Joints[i], leg.Joints[i+1]
tail.Pos.X = head.Pos.X
tail.Pos.Y = head.Pos.Y + tail.Length
}
}
func (leg *Leg) Reach(target Vector) {
leg.Target = target
origin := leg.Joints[0].Pos
n := len(leg.Joints)
// forward reaching
leg.Joints[n-1].Pos = target
for i := n - 2; i >= 0; i-- {
center, placed := leg.Joints[i], leg.Joints[i+1]
center.Reach(placed.Pos, placed.Length)
}
leg.Joints[0].Pos = origin
for i := 0; i < n-1; i++ {
placed, center := leg.Joints[i], leg.Joints[i+1]
center.Reach(placed.Pos, center.Length)
}
for i := 0; i < n-1; i++ {
placed, center := leg.Joints[i], leg.Joints[i+1]
center.RecalculateAngle(placed)
center.RelativeAngle = center.Angle - placed.Angle
}
}
func (leg *Leg) Render(draw *imdraw.IMDraw) {
w := float64(len(leg.Joints) * 3)
for i := 0; i < len(leg.Joints)-1; i++ {
head := leg.Joints[i]
tail := leg.Joints[i+1]
draw.Color = HSL{float32(i) * math.Phi, 0.5, 0.5}
draw.EndShape = imdraw.SharpEndShape
draw.Push(
head.Pos.Pixel(),
tail.Pos.Pixel())
draw.Line(w)
w *= 0.8
}
for _, joint := range leg.Joints {
draw.Color = RGB{0, 0, 255}
direction := pixel.V(math.Cos(joint.Angle), math.Sin(joint.Angle))
draw.Push(
joint.Pos.Pixel(),
joint.Pos.Pixel().Add(direction.Scaled(30)))
draw.Line(2)
t := text.New(joint.Pos.Pixel(), DefaultFont)
t.Color = RGB{0, 0, 0}
fmt.Fprintf(t, "%.2f", joint.Angle*RadToDeg)
t.Draw(draw, pixel.IM)
}
draw.Color = HSL{TAU * 3 / 4, 0.8, 0.3}
draw.Push(leg.Target.Pixel())
draw.Circle(5, 0)
}
type Robot struct {
Left *Leg
Right *Leg
}
func NewRobot(femurLength, tibiaLength Length) *Robot {
robot := &Robot{}
robot.Left = NewLeg(femurLength, tibiaLength)
robot.Left.Base.Pos.X = -30.00 * MM
robot.Left.Base.Pos.Y = 48.80 * MM
robot.Left.Base.Angle = TAU / 2
robot.Left.Reset()
robot.Right = NewLeg(femurLength, tibiaLength)
robot.Right.Base.Pos.Y = 48.80 * MM
robot.Right.Base.Pos.X = 30.00 * MM
robot.Right.Reset()
return robot
}
func (robot *Robot) Update(t, dt float64) {
tx := 40*MM + Length((math.Sin(t)*0.5+0.5)*50*MM)
robot.Right.Reach(Vector{tx, 0})
robot.Left.Reach(Vector{-tx, 0})
}
func (robot *Robot) Render(draw *imdraw.IMDraw) {
robot.Left.Render(draw)
robot.Right.Render(draw)
}
func run() {
cfg := pixelgl.WindowConfig{
Title: "IK-2D",
Bounds: pixel.R(0, 0, 1024, 768),
VSync: true,
}
win, err := pixelgl.NewWindow(cfg)
if err != nil {
panic(err)
}
robot := NewRobot(44.43*MM, 50.0*MM)
start := time.Now()
for !win.Closed() {
win.Clear(RGB{255, 255, 255})
now := time.Since(start).Seconds()
robot.Update(now, 1/30)
draw := imdraw.New(DefaultFont.Picture())
center := win.Bounds().Size().Scaled(0.5)
draw.SetMatrix(pixel.IM.Moved(center))
{
const N = 50
for t := -N; t <= N; t++ {
draw.Color = HSL{0, 0, 0.9}
if t%5 == 0 {
draw.Color = HSL{0, 0, 0.8}
}
draw.Push(
Vector{Length(t) * 10 * MM, -10 * MM * N}.Pixel(),
Vector{Length(t) * 10 * MM, 10 * MM * N}.Pixel(),
)
draw.Line(1)
draw.Push(
Vector{-10 * MM * N, Length(t) * 10 * MM}.Pixel(),
Vector{10 * MM * N, Length(t) * 10 * MM}.Pixel(),
)
draw.Line(1)
}
// gizmo
draw.Color = HSL{0, 0.8, 0.5}
draw.Push(pixel.ZV, Vector{0, 50 * MM}.Pixel())
draw.Line(2)
draw.Color = HSL{TAU / 4, 0.8, 0.5}
draw.Push(pixel.ZV, Vector{50 * MM, 0}.Pixel())
draw.Line(2)
}
robot.Render(draw)
draw.Draw(win)
win.Update()
}
}
func main() {
pixelgl.Run(run)
}
const TAU = math.Pi * 2
type RGBA uint32
type RGB struct{ R, G, B uint8 }
type HSL struct{ H, S, L float32 }
func (rgba RGBA) RGBA() (r, g, b, a uint32) {
r = uint32(rgba >> 24 & 0xFF)
g = uint32(rgba >> 16 & 0xFF)
b = uint32(rgba >> 8 & 0xFF)
a = uint32(rgba >> 0 & 0xFF)
r |= r << 8
g |= g << 8
b |= b << 8
a |= a << 8
return
}
func (rgb RGB) RGBA() (r, g, b, a uint32) {
r, g, b = uint32(rgb.R), uint32(rgb.G), uint32(rgb.B)
r |= r << 8
g |= g << 8
b |= b << 8
a = 0xFFFF
return
}
func (hsl HSL) RGBA() (r, g, b, a uint32) {
r1, g1, b1, _ := hsla(hsl.H, hsl.S, hsl.L, 1)
return sat16(r1), sat16(g1), sat16(b1), 0xFFFF
}
func hue(v1, v2, h float32) float32 {
if h < 0 {
h += 1
}
if h > 1 {
h -= 1
}
if 6*h < 1 {
return v1 + (v2-v1)*6*h
} else if 2*h < 1 {
return v2
} else if 3*h < 2 {
return v1 + (v2-v1)*(2.0/3.0-h)*6
}
return v1
}
func hsla(h, s, l, a float32) (r, g, b, ra float32) {
if s == 0 {
return l, l, l, a
}
h = float32(math.Mod(float64(h), TAU) / TAU)
var v2 float32
if l < 0.5 {
v2 = l * (1 + s)
} else {
v2 = (l + s) - s*l
}
v1 := 2*l - v2
r = hue(v1, v2, h+1.0/3.0)
g = hue(v1, v2, h)
b = hue(v1, v2, h-1.0/3.0)
ra = a
return
}
// sat16 converts 0..1 float32 to 0..0xFFFF uint32
func sat16(v float32) uint32 {
v = v * 0xFFFF
if v >= 0xFFFF {
return 0xFFFF
} else if v <= 0 {
return 0
}
return uint32(v) & 0xFFFF
}
// sat8 converts 0..1 float32 to 0..0xFF uint8
func sat8(v float32) uint8 {
v = v * 0xFF
if v >= 0xFF {
return 0xFF
} else if v <= 0 {
return 0
}
return uint8(v) & 0xFF
} | exp/fabrik2d/ik2d.go | 0.692746 | 0.484014 | ik2d.go | starcoder |
package plan
import (
"fmt"
"strings"
"time"
)
// Config describes the unstructured test plan
type Config struct {
Reports []string
CallTimeout time.Duration
WaitForTimeout time.Duration
WaitForHosts []string
Axes Axes
Behaviors Behaviors
JSONReportPath string
}
// Axes is a collection of Axis objects sortable by axis name.
type Axis struct {
Name string
Values []string
}
// Axes is a slice of "Axis"
type Axes []Axis
func (a Axes) Len() int { return len(a) }
func (a Axes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Axes) Less(i, j int) bool { return a[i].Name < a[j].Name }
// Index returns the Axes indexed by name of Axis.
func (a Axes) Index() map[string]Axis {
axes := make(map[string]Axis, len(a))
for _, axis := range a {
axes[axis.Name] = axis
}
return axes
}
// Filter specifies criteria for skipping specific test cases of a behavior.
// All test cases for a behavior where all parameter values match the AxisMatcher will be skipped.
type Filter struct {
Matchers []AxisMatcher
}
// Matches returns true if all matchers associated with this Filter match the given test arguments.
func (f Filter) Matches(testArgs TestClientArgs) bool {
for _, match := range f.Matchers {
if !match.Matches(testArgs) {
return false
}
}
return true
}
// String returns formatted matches in the Filter separated by a'+'.
func (f Filter) String() string {
var formattedMatches []string
for _, match := range f.Matchers {
formattedMatches = append(formattedMatches, match.String())
}
return strings.Join(formattedMatches, "+")
}
// AxisMatcher matches an axis name to a give value.
type AxisMatcher struct {
Name string
Value string
}
// Matches returns true if the given TestClientArgs match this AxisMatcher.
func (a AxisMatcher) Matches(args TestClientArgs) bool {
return args[a.Name] == a.Value
}
// String return a formatted string for axis and value separated by colon.
func (a AxisMatcher) String() string {
return fmt.Sprintf("%s:%s", a.Name, a.Value)
}
// Behavior represents the test behavior that will be triggered by crossdock.
type Behavior struct {
Name string
ClientAxis string
ParamsAxes []string
Filters []Filter
}
// HasAxis checks and returns true if the passed axis is referenced by the behavior, false otherwise.
func (b Behavior) HasAxis(axisToFind string) bool {
if axisToFind == b.ClientAxis {
return true
}
for _, axis := range b.ParamsAxes {
if axis == axisToFind {
return true
}
}
return false
}
// Behaviors is a collection of Behavior objects sortable by behavior name.
type Behaviors []Behavior
func (b Behaviors) Len() int { return len(b) }
func (b Behaviors) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b Behaviors) Less(i, j int) bool { return b[i].Name < b[j].Name }
func (b Behaviors) attachFilters(filtersByBehavior map[string][]Filter) error {
for i, behavior := range b {
filters := filtersByBehavior[behavior.Name]
for _, filter := range filters {
if len(filters) == 0 {
continue
}
for _, axisToMatch := range filter.Matchers {
if !behavior.HasAxis(axisToMatch.Name) {
return fmt.Errorf("%q is not a parameter for behavior %q", axisToMatch, behavior.Name)
}
}
}
behavior.Filters = filters
b[i] = behavior
}
return nil
}
// Plan describes the entirety of the test program
type Plan struct {
Config *Config
TestCases []TestCase
less func(i, j int) bool
}
// TestCase represents the request made to test clients.
type TestCase struct {
Plan *Plan
Client string
Arguments TestClientArgs
Skip bool
SkipReason string
}
// TestClientArgs represents custom args to pass to test client.
type TestClientArgs map[string]string | plan/entities.go | 0.743913 | 0.537041 | entities.go | starcoder |
package main
import (
"image"
"image/color"
"image/draw"
"image/png"
"log"
"math"
"os"
"github.com/qeedquan/go-media/math/f64"
)
func main() {
P := [][4]float64{
{3, 4.5, 10, 10},
{4, 12, 15, 15},
{7, 10, 6, 6},
{5, 4, 4, 4},
{5, 2, 7, 7},
{5, 2, 13, 13},
{4, 1, 1, 1},
{4, 1, 7, 8},
{6, 1, 7, 8},
{2, 2, 2, 2},
{1, 0.5, 0.5, 0.5},
{2, 0.5, 0.5, 0.5},
{3, 0.5, 0.5, 0.5},
{5, 1, 1, 1},
{2, 1, 1, 1},
{7, 3, 4, 17},
{2, 1, 4, 8},
{6, 1, 4, 8},
{7, 2, 8, 4},
{4, 0.5, 0.5, 4},
{8, 0.5, 0.5, 8},
{16, 0.5, 0.5, 16},
{3, 30, 15, 15},
{4, 30, 15, 15},
{16, 2, 0.5, 16},
}
r := image.Rect(0, 0, 1024, 1024)
m := image.NewRGBA(r)
draw.Draw(m, m.Bounds(), image.NewUniform(color.RGBA{128, 128, 128, 255}), image.ZP, draw.Src)
for i, P := range P {
N := 4
s := r.Dx() / 2 / N
ox := (i%N)*s + s/2
oy := (i/N)*s + s/2
// parametric form, increment a small angle, calculate r
// then convert to rectangular coordinate to set the pixel, need a small step
// so we don't miss points
for t := 0.0; t <= 2*math.Pi; t += 1e-4 {
r := 8 * super(t, 1, 1, P[0], P[0], P[1], P[2], P[3])
x := r*math.Cos(t) + float64(ox)
y := r*math.Sin(t) + float64(oy)
m.Set(int(x), int(y), color.RGBA{224, 224, 255, 255})
}
// implicit distance form, we iterate through our draw viewport
// determine if the distance is within the shape boundary and draw it
D := float64(s)
// zoom factor
Z := 3.0
for y := -D; y <= D; y++ {
for x := -D; x <= D; x++ {
// all parameters used seem to have domain [-5,5] (determined empirically)
px := f64.LinearRemap(x, -D, D, -5*Z, 5*Z)
py := f64.LinearRemap(y, -D, D, -5*Z, 5*Z)
ds := imsuper(px, py, 1, 1, P[0], P[0], P[1], P[2], P[3])
if ds <= 1 {
m.Set(int(x+float64(ox)+float64(r.Dx())/2), int(y+float64(oy)), color.RGBA{224, 224, 255, 255})
}
}
}
}
f, err := os.Create("superformula.png")
ck(err)
ck(png.Encode(f, m))
ck(f.Close())
}
func ck(err error) {
if err != nil {
log.Fatal(err)
}
}
func super(phi, a, b, m1, m2, n1, n2, n3 float64) float64 {
u := math.Abs(math.Cos(m1*phi/4) / a)
v := math.Abs(math.Sin(m2*phi/4) / b)
u = math.Pow(u, n2)
v = math.Pow(v, n3)
return math.Pow(u+v, -1/n1)
}
func imsuper(x, y, a, b, m1, m2, n1, n2, n3 float64) float64 {
r := math.Hypot(x, y)
phi := math.Atan2(y, x)
v := super(phi, a, b, m1, m2, n1, n2, n3)
return r - v
} | gfx/superformula-plot.go | 0.564098 | 0.493714 | superformula-plot.go | starcoder |
package slice
import (
"constraints"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// Each calls the function on each item in the slice.
func Each[A ~[]T, T any](arr A, f func(T)) {
for _, v := range arr {
f(v)
}
}
// Collect returns a new slice of values by mapping each value of original slice through a transformation function.
func Collect[A ~[]T, T any, M any](arr A, f func(T) M) []M {
ret := make([]M, len(arr))
for i, v := range arr {
ret[i] = f(v)
}
return ret
}
// Reduce reduces a slice of values to single value.
func Reduce[A ~[]T, T any, M any](arr A, f func(M, T) M, initial M) M {
for _, v := range arr {
initial = f(initial, v)
}
return initial
}
// Find returns the first element in the slice that matches the condition.
// If slice doesn't contain an element it returns a default type value and false as second value.
func Find[A ~[]T, T any](arr A, f func(T) bool) (T, bool) {
for _, v := range arr {
if f(v) {
return v, true
}
}
return defaultvalue[T](), false
}
// Filter returns all elements in the slice that mathch the condition.
func Filter[A ~[]T, T any](arr A, f func(T) bool) A {
var ret A
for _, v := range arr {
if f(v) {
ret = append(ret, v)
}
}
return ret
}
// Every returns true if all elements match the condition.
func Every[A ~[]T, T any](arr A, f func(T) bool) bool {
for _, v := range arr {
if !f(v) {
return false
}
}
return true
}
// Some returns true if there is at least one element that satisfies the condition.
func Some[A ~[]T, T any](arr A, f func(T) bool) bool {
for _, v := range arr {
if f(v) {
return true
}
}
return false
}
// Contains returns true if value is present in the slice.
func Contains[A ~[]T, T comparable](arr A, value T) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// Max returns the maximum value from the slice.
// If input slice is empty it returns a default value for input type.
func Max[A ~[]T, T constraints.Ordered](arr A) T {
if len(arr) == 0 {
return defaultvalue[T]()
}
e := arr[0]
for i := 1; i < len(arr); i++ {
if arr[i] > e {
e = arr[i]
}
}
return e
}
// Min returns the minimum value from the slice.
// If input slice is empty it returns a default value for input type.
func Min[A ~[]T, T constraints.Ordered](arr A) T {
if len(arr) == 0 {
return defaultvalue[T]()
}
e := arr[0]
for i := 1; i < len(arr); i++ {
if arr[i] < e {
e = arr[i]
}
}
return e
}
// GroupBy splits the slice into groups, grouped by the result of the function call.
func GroupBy[A ~[]T, T any, M comparable](arr A, f func(T) M) map[M]A {
ret := make(map[M]A)
for _, v := range arr {
m := f(v)
ret[m] = append(ret[m], v)
}
return ret
}
// Sample returns the random element from slice.
func Sample[A ~[]T, T any](arr A) T {
return arr[rand.Intn(len(arr))]
}
// SampleN returns the N random elements from slice.
func SampleN[A ~[]T, T any](arr A, n int) []T {
if n < 0 {
return A{}
}
if n > len(arr) {
n = len(arr)
}
ret := make([]T, n)
for i, v := range rand.Perm(n) {
ret[i] = arr[v]
}
return ret
}
// Union returns a slice of unique values from passed slices.
func Union[A ~[]T, T comparable](arr ...A) A {
if len(arr) == 0 {
return A{}
}
if len(arr) == 1 {
return arr[0]
}
ret := make(A, 0, len(arr[0]))
m := make(map[T]struct{})
for _, array := range arr {
for i := 0; i < len(array); i++ {
if _, ok := m[array[i]]; !ok {
ret = append(ret, array[i])
m[array[i]] = struct{}{}
}
}
}
return ret
}
// Intersection returns a slice of values that are in all passed slices.
func Intersection[A ~[]T, T comparable](arr ...A) A {
if len(arr) == 0 {
return A{}
}
if len(arr) == 1 {
return arr[0]
}
ret := arr[0]
arr = arr[1:]
for len(arr) != 0 {
var nextPath A
part2 := arr[0]
m := make(map[T]struct{})
for _, array := range []A{ret, part2} {
for i := 0; i < len(array); i++ {
if _, ok := m[array[i]]; ok {
nextPath = append(nextPath, array[i])
} else {
m[array[i]] = struct{}{}
}
}
}
ret = nextPath
arr = arr[1:]
}
return ret
}
// Uniq returns a slice of unique values.
func Uniq[A ~[]T, T comparable](arr A) []T {
ret := make(A, 0)
m := make(map[T]struct{})
for _, elem := range arr {
if _, ok := m[elem]; !ok {
m[elem] = struct{}{}
ret = append(ret, elem)
}
}
return ret
}
// IndexOf returns first index of the found element in the slice.
// If slice doesn't contain an element it returns -1.
func IndexOf[A ~[]T, T comparable](arr A, value T) int {
for i := 0; i < len(arr); i++ {
if arr[i] == value {
return i
}
}
return -1
}
// LastIndexOf like as IndexOf, but the search goes from the end.
func LastIndexOf[A ~[]T, T comparable](arr A, value T) int {
for i := len(arr) - 1; i >= 0; i-- {
if arr[i] == value {
return i
}
}
return -1
}
// Reverse reverses the order of the elements in place.
func Reverse[A ~[]T, T any](arr A) {
for i := 0; i < len(arr)/2; i++ {
arr[i], arr[len(arr)-i-1] = arr[len(arr)-i-1], arr[i]
}
}
func defaultvalue[T any]() T {
n := new(T)
return *n
} | slice/slice.go | 0.875933 | 0.648049 | slice.go | starcoder |
package snowflake
var (
// Epoch defines a start time and recommended to be set as the project on-line time.
Epoch int64 = 1288834974657
// OvertimeBits defines the number of bits occupied by Overtime. Automatic initialization.
OvertimeBits uint8 = 64
// SequenceBits defines the number of bits occupied by Sequence.
SequenceBits uint8 = 12
// OvertimeOffsetBits defines the offset of Overtime. Automatic initialization.
OvertimeOffsetBits uint8 = SequenceBits
// SequenceOffsetBits defines the offset of Sequence. Automatic initialization.
SequenceOffsetBits uint8
// OvertimeMax defines the maximum value of Overtime. Automatic initialization.
OvertimeMax int64 = -1 ^ (-1 << OvertimeMax)
// SequenceMax defines the maximum value of Sequence. Automatic initialization.
SequenceMax int64 = -1 ^ (-1 << SequenceBits)
)
// ID is the raw interface of snowflake id.
// The following functions represent actions that can be used.
type ID interface {
// convert id to int64 type
ToInt64() (int64, error)
// convert id to []byte type
ToBytes() ([]byte, error)
// convert id to string type
ToString() (string, error)
// Calculates id create time
CreateTime() int64
}
// Manager is the raw interface used to create snowflake id.
// The following functions represent the operations that can be used.
// To be honest, I don't know why I don't call it generators.
type Manager interface {
// New return a Snowflake ID interface.
// This ID records key data.
New(map[string]int64) ID
// NewToInt64 return int64 data that can be used to represent an ID, and return possible errors.
NewToInt64(map[string]int64) (int64, error)
// ParseInt64 return parsed int64 data, and return possible errors.
ParseInt64(int64) (ID, error)
// NewBytes return []byte data that can be used to represent an ID, and return possible errors.
NewBytes(map[string]int64) ([]byte, error)
// ParseInt64 return parsed []byte data, and return possible errors.
ParseBytes([]byte) (ID, error)
// NewString return string data that can be used to represent an ID, and return possible errors.
NewString(map[string]int64) (string, error)
// ParseInt64 return parsed string data, and return possible errors.
ParseString(string) (ID, error)
}
// NewManager retrun NewDefaultManager object.
// snowflagid is mainly generated/parsed by manager.
func NewManager() (Manager, error) {
return NewDefaultManager()
} | snowflake.go | 0.684686 | 0.419826 | snowflake.go | starcoder |
package functional
import (
"math"
"math/rand"
)
type UnaryFn func(float64) float64
func (f UnaryFn) Add(f2 UnaryFn) UnaryFn {
return func(x float64) float64 {
return f(x) + f2(x)
}
}
func (f UnaryFn) Sub(f2 UnaryFn) UnaryFn {
return func(x float64) float64 {
return f(x) - f2(x)
}
}
func (f UnaryFn) Mul(f2 UnaryFn) UnaryFn {
return func(x float64) float64 {
return f(x) * f2(x)
}
}
func (f UnaryFn) Div(f2 UnaryFn) UnaryFn {
return func(x float64) float64 {
return f(x) / f2(x)
}
}
func Shuffle(vec []int) {
for i := len(vec) - 1; i >= 0; i-- {
index := rand.Intn(i + 1)
vec[i], vec[index] = vec[index], vec[i]
}
}
func Constant(c float64) UnaryFn { return func(x float64) float64 { return c } }
func KSigmoid(k float64) UnaryFn { return func(x float64) float64 { return Sigmoid(k * x) } }
func KSigmoidPrime(k float64) UnaryFn {
return func(x float64) float64 { return SigmoidPrime(k*x) * k }
}
func Scale(k float64) UnaryFn { return func(x float64) float64 { return k * x } }
func Offset(b float64) UnaryFn { return func(x float64) float64 { return x + b } }
func Affine(k, b float64) UnaryFn { return func(x float64) float64 { return k*x + b } }
func Power(p float64) UnaryFn { return func(x float64) float64 { return math.Pow(x, p) } }
var (
ConstantOne UnaryFn = func(x float64) float64 { return 1 }
Identity UnaryFn = func(x float64) float64 { return x }
Square UnaryFn = func(x float64) float64 { return x * x }
Abs UnaryFn = func(x float64) float64 {
return float64(math.Abs(float64(x)))
}
Sign UnaryFn = func(x float64) float64 {
if x > 0 {
return 1
}
return -1
}
Sigmoid UnaryFn = func(x float64) float64 {
return float64(1.0 / (1.0 + math.Exp(-float64(x))))
}
SigmoidPrime UnaryFn = func(x float64) float64 {
x = Sigmoid(x)
return x * (1 - x)
}
)
type BinaryFn func(x, y float64) float64
func (f BinaryFn) Add(f2 BinaryFn) BinaryFn {
return func(x, y float64) float64 {
return f(x, y) + f2(x, y)
}
}
func (f BinaryFn) Sub(f2 BinaryFn) BinaryFn {
return func(x, y float64) float64 {
return f(x, y) - f2(x, y)
}
}
func (f BinaryFn) Mul(f2 BinaryFn) BinaryFn {
return func(x, y float64) float64 {
return f(x, y) * f2(x, y)
}
}
func (f BinaryFn) Div(f2 BinaryFn) BinaryFn {
return func(x, y float64) float64 {
return f(x, y) / f2(x, y)
}
}
var (
Add BinaryFn = func(x, y float64) float64 { return x + y }
Sub BinaryFn = func(x, y float64) float64 { return x - y }
Mul BinaryFn = func(x, y float64) float64 { return x * y }
Div BinaryFn = func(x, y float64) float64 { return x / y }
Pow BinaryFn = math.Pow
) | math/functional/function.go | 0.828384 | 0.610076 | function.go | starcoder |
package audio
import (
"time"
"github.com/oakmound/oak/v4/audio/pcm"
)
// FadeIn wraps a reader such that it will linearly fade in over the given duration.
func FadeIn(dur time.Duration, in pcm.Reader) pcm.Reader {
perSec := in.PCMFormat().BytesPerSecond()
bytesToFadeIn := int((time.Duration(perSec) / 1000) * (dur / time.Millisecond))
return &fadeInReader{
Reader: in,
toFadeIn: bytesToFadeIn,
totalToFadeIn: bytesToFadeIn,
}
}
type fadeInReader struct {
pcm.Reader
toFadeIn, totalToFadeIn int
}
func (fir *fadeInReader) ReadPCM(b []byte) (n int, err error) {
if fir.toFadeIn == 0 {
return fir.Reader.ReadPCM(b)
}
read, err := fir.Reader.ReadPCM(b)
if err != nil {
return read, err
}
format := fir.PCMFormat()
switch format.Bits {
case 8:
for i, byt := range b[:read] {
fadeInPercent := (float64(fir.totalToFadeIn) - float64(fir.toFadeIn)) / float64(fir.totalToFadeIn)
if fadeInPercent >= 1 {
fadeInPercent = 1
}
b[i] = byte(int8(float64(int8(byt)) * fadeInPercent))
fir.toFadeIn--
}
case 16:
for i := 0; i+2 <= read; i += 2 {
fadeInPercent := (float64(fir.totalToFadeIn) - float64(fir.toFadeIn)) / float64(fir.totalToFadeIn)
if fadeInPercent >= 1 {
fadeInPercent = 1
}
i16 := int16(b[i]) + (int16(b[i+1]) << 8)
new16 := int16(float64(i16) * fadeInPercent)
b[i] = byte(new16)
b[i+1] = byte(new16 >> 8)
fir.toFadeIn -= 2
}
case 32:
for i := 0; i+4 <= read; i += 4 {
fadeInPercent := (float64(fir.totalToFadeIn) - float64(fir.toFadeIn)) / float64(fir.totalToFadeIn)
if fadeInPercent >= 1 {
fadeInPercent = 1
}
i32 := int32(b[i]) +
(int32(b[i+1]) << 8) +
(int32(b[i+2]) << 16) +
(int32(b[i+3]) << 24)
new32 := int32(float64(i32) * fadeInPercent)
b[i] = byte(new32)
b[i+1] = byte(new32 >> 8)
b[i+2] = byte(new32 >> 16)
b[i+3] = byte(new32 >> 24)
fir.toFadeIn -= 4
}
}
return read, nil
}
// FadeOut wraps a reader such that it will linearly fade out over the given duration.
func FadeOut(dur time.Duration, in pcm.Reader) pcm.Reader {
perSec := in.PCMFormat().BytesPerSecond()
bytestoFadeOut := int((time.Duration(perSec) / 1000) * (dur / time.Millisecond))
return &fadeOutReader{
Reader: in,
toFadeOut: bytestoFadeOut,
totaltoFadeOut: bytestoFadeOut,
}
}
type fadeOutReader struct {
pcm.Reader
toFadeOut, totaltoFadeOut int
}
func (fir *fadeOutReader) ReadPCM(b []byte) (n int, err error) {
if fir.toFadeOut == 0 {
return fir.Reader.ReadPCM(b)
}
read, err := fir.Reader.ReadPCM(b)
if err != nil {
return read, err
}
format := fir.PCMFormat()
switch format.Bits {
case 8:
for i, byt := range b[:read] {
fadeOutPercent := float64(fir.toFadeOut) / float64(fir.totaltoFadeOut)
if fadeOutPercent <= 0 {
fadeOutPercent = 0
}
b[i] = byte(int8(float64(int8(byt)) * fadeOutPercent))
fir.toFadeOut--
}
case 16:
for i := 0; i+2 <= read; i += 2 {
fadeOutPercent := float64(fir.toFadeOut) / float64(fir.totaltoFadeOut)
if fadeOutPercent <= 0 {
fadeOutPercent = 0
}
i16 := int16(b[i]) + (int16(b[i+1]) << 8)
new16 := int16(float64(i16) * fadeOutPercent)
b[i] = byte(new16)
b[i+1] = byte(new16 >> 8)
fir.toFadeOut -= 2
}
case 32:
for i := 0; i+4 <= read; i += 4 {
fadeOutPercent := float64(fir.toFadeOut) / float64(fir.totaltoFadeOut)
if fadeOutPercent <= 0 {
fadeOutPercent = 0
}
i32 := int32(b[i]) +
(int32(b[i+1]) << 8) +
(int32(b[i+2]) << 16) +
(int32(b[i+3]) << 24)
new32 := int32(float64(i32) * fadeOutPercent)
b[i] = byte(new32)
b[i+1] = byte(new32 >> 8)
b[i+2] = byte(new32 >> 16)
b[i+3] = byte(new32 >> 24)
fir.toFadeOut -= 4
}
}
return read, nil
}
var _ pcm.Reader = &fadeOutReader{}
var _ pcm.Reader = &fadeInReader{} | audio/fade.go | 0.739328 | 0.404272 | fade.go | starcoder |
package slab
import (
"fmt"
compgeo "github.com/200sc/go-compgeo"
"github.com/200sc/go-compgeo/dcel"
"github.com/200sc/go-compgeo/dcel/pointLoc"
"github.com/200sc/go-compgeo/dcel/pointLoc/visualize"
"github.com/200sc/go-compgeo/geom"
"github.com/200sc/go-compgeo/search"
"github.com/200sc/go-compgeo/search/tree"
)
// Decompose is based on Dobkin and Lipton's work into
// point location.
// The real difficulties in Slab Decomposition are all in the
// persistent bst itself, so this is a fairly simple function.
func Decompose(dc *dcel.DCEL, bstType tree.Type) (pointLoc.LocatesPoints, error) {
if dc == nil || len(dc.Vertices) < 3 {
return nil, compgeo.BadDCELError{}
}
if dc.Vertices[0].D() < 2 {
// I don't know why someone would want to get the slab decomposition of
// a structure which has more than two dimensions but there could be
// applications so we don't reject that idea offhand.
return nil, compgeo.BadDimensionError{}
}
t := tree.New(bstType).ToPersistent()
pts := dc.VerticesSorted(0)
i := 0
for i < len(pts) {
p := pts[i]
v := dc.Vertices[p]
// Set the BST's instant to the x value of this point
visualize.HighlightColor = visualize.CheckLineColor
visualize.DrawVerticalLine(v)
t.SetInstant(v.X())
ct := t.ThisInstant()
// Aggregate all points at this x value so we do not
// attempt to add edges to a tree which contains edges
// point to the left of v[0]
vs := []*dcel.Vertex{v}
for (i+1) < len(pts) && geom.F64eq(dc.Vertices[pts[i+1]].X(), v.X()) {
i++
p = pts[i]
vs = append(vs, dc.Vertices[p])
}
le := []*dcel.Edge{}
re := []*dcel.Edge{}
for _, v := range vs {
// We don't need to check the returned error here
// because we already checked this above-- if a DCEL
// contains points where some points have a different
// dimension than others that will cause further problems,
// but this is too expensive to check here.
leftEdges, rightEdges, _, _ := v.PartitionEdges(0)
le = append(le, leftEdges...)
re = append(re, rightEdges...)
}
fmt.Println("Left Edges", le)
fmt.Println("Right Edges", re)
// Remove all edges from the PersistentBST connecting to the left
// of the points
visualize.HighlightColor = visualize.RemoveColor
for _, e := range le {
fmt.Println("Removing", e.Twin)
err := ct.Delete(shellNode{compEdge{e.Twin}, search.Nil{}})
fmt.Println("Remove result", err)
fmt.Println(ct)
}
// Add all edges to the PersistentBST connecting to the right
// of the point
visualize.HighlightColor = visualize.AddColor
for _, e := range re {
// We always want the half edge that points to the right,
// and between the two faces this edge is on we want the
// face which is LOWER. This is because we ulimately point
// locate to the edge above the query point. Returning an
// edge for a query represents that the query is below
// the edge,
fmt.Println("Adding", e)
ct.Insert(shellNode{compEdge{e}, faces{e.Face, e.Twin.Face}})
fmt.Println(ct)
}
i++
}
visualize.HighlightColor = visualize.CheckLineColor
return &PointLocator{t, dc.Faces[dcel.OUTER_FACE]}, nil
}
// PointLocator is a construct that uses slab
// decomposition for point location.
type PointLocator struct {
dp search.DynamicPersistent
outerFace *dcel.Face
}
func (spl *PointLocator) String() string {
return fmt.Sprintf("%v", spl.dp)
}
// PointLocate returns which face within this SlabPointLocator
// the query point lands, within two dimensions.
func (spl *PointLocator) PointLocate(vs ...float64) (*dcel.Face, error) {
if len(vs) < 2 {
return nil, compgeo.InsufficientDimensionsError{}
}
fmt.Println("Querying", vs)
tree := spl.dp.AtInstant(vs[0])
fmt.Println("Tree found:")
fmt.Println(tree)
p := geom.Point{vs[0], vs[1], 0}
e, f := tree.SearchDown(p, 0)
if e == nil {
fmt.Println("Location on empty tree")
return nil, nil
}
e2, f2 := tree.SearchUp(p, 0)
fmt.Println("Edges found", e, e2)
if geom.VerticalCompare(p, e.(compEdge)) == search.Greater {
fmt.Println(p, "is above edge", e)
return nil, nil
}
if geom.VerticalCompare(p, e2.(compEdge)) == search.Less {
fmt.Println(p, "is below edge", e2)
return nil, nil
}
// We then do PIP on each face, and return
// whichever is true, if any.
f3 := f.(faces)
f4 := f2.(faces)
faces := []*dcel.Face{f3.f1, f3.f2, f4.f1, f4.f2}
for _, f5 := range faces {
if f5 != spl.outerFace {
fmt.Println("Checking if face contains", p)
visualize.HighlightColor = visualize.CheckFaceColor
visualize.DrawFace(f5)
if f5.Contains(p) {
fmt.Println("P was contained")
return f5, nil
}
}
}
return nil, nil
} | dcel/pointLoc/slab/slabDecomp.go | 0.617743 | 0.455441 | slabDecomp.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.