hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8cf2d55e40a7f8dba7559856ea643336c51f17b9
| 9,477
|
py
|
Python
|
lib/structures.py
|
logicplace/race-mini-editor
|
3f18b048ef5608e8ee96f87e6337d3e9fb63c95d
|
[
"MIT"
] | 3
|
2020-08-08T09:41:51.000Z
|
2022-02-23T00:36:10.000Z
|
lib/structures.py
|
logicplace/race-mini-editor
|
3f18b048ef5608e8ee96f87e6337d3e9fb63c95d
|
[
"MIT"
] | null | null | null |
lib/structures.py
|
logicplace/race-mini-editor
|
3f18b048ef5608e8ee96f87e6337d3e9fb63c95d
|
[
"MIT"
] | null | null | null |
import re
import struct
from typing import Any, BinaryIO, Dict, NamedTuple, Optional, Sequence, Tuple
from .util import music, tilesets, load_tileset, spritesets, load_spriteset, PokeImportError
from .sound import MinLibSound
from .encoders import encode_tiles
| 25.339572
| 92
| 0.661602
|
import re
import struct
from typing import Any, BinaryIO, Dict, NamedTuple, Optional, Sequence, Tuple
from .util import music, tilesets, load_tileset, spritesets, load_spriteset, PokeImportError
from .sound import MinLibSound
from .encoders import encode_tiles
def to3b(x: int):
return x.to_bytes(3, "little")
def read2b_base(f: BinaryIO, table_base: int, idx: int) -> int:
f.seek(table_base + idx * 2)
return (table_base & 0xff0000) + int.from_bytes(f.read(2), "little")
def write2b_base_and_seek(f: BinaryIO, table_base: int, idx: int, addr: int):
f.seek(table_base + idx * 2)
f.write((addr & 0xffff).to_bytes(2, "little"))
f.seek(addr)
def readXb_until(f: BinaryIO, x: int, until: int):
ret = []
comp = ~until
while comp != until:
data = f.read(x)
ret.append(data)
comp = data[0]
return ret
class TrackGhost:
# 0bPRLDUCBA
DPAD = 0x78
POWER = 0x80
RIGHT = 0x40
LEFT = 0x20
DOWN = 0x10
UP = 0x08
C = 0x04
B = 0x02
A = 0x01
_splitter = re.compile(r'#.*|\d+|[^\s\d#]+')
dir_to_arrows = {
0: "",
RIGHT: "→",
LEFT: "←",
DOWN: "↓",
UP: "↑",
RIGHT | DOWN: "↘",
LEFT | DOWN: "↙",
# Stupid ones
RIGHT | UP: "↗",
LEFT | UP: "↖",
RIGHT | LEFT: "↔",
DOWN | UP: "↕",
RIGHT | LEFT | DOWN: "↔↓",
RIGHT | LEFT | UP: "↔↑",
RIGHT | DOWN | UP: "→↕",
LEFT | DOWN | UP: "←↕",
RIGHT | LEFT | DOWN | UP: "↔↕",
}
# yapf: disable
arrow_to_dir = {
"r": 0,
">": RIGHT, "→": RIGHT,
"<": LEFT, "←": LEFT,
"v": DOWN, "↓": DOWN,
"^": UP, "↑": UP,
"\\": RIGHT | DOWN, "↘": RIGHT | DOWN,
"/": RIGHT | DOWN, "↙": RIGHT | DOWN,
"C": C, "B": B, "A": A,
"c": C, "b": B, "a": A,
# Stupid ones
"↗": RIGHT | UP,
"↖": LEFT | UP,
"↔": RIGHT | LEFT,
"↕": DOWN | UP,
}
# yapf: enable
def __init__(self):
self.ops: Sequence[Tuple[int, int]] = []
@classmethod
def from_bin(cls, source: BinaryIO) -> "TrackGhost":
ret = cls()
while True:
data = source.read(10)
for i in range(0, 10, 2):
keys = data[i]
ticks = data[i + 1]
ret.ops.append((keys, ticks))
if keys == 0xff:
source.seek(-(10 - i + 2), 1)
return ret
def to_bin(self) -> bytes:
ret = bytearray()
for op in self.ops:
ret.extend(op)
return bytes(ret)
def to_string(self):
ret = []
last_dir = 0
for keys, ticks in self.ops:
op = []
if keys == 0:
op.append("r")
last_dir = 0
elif keys == 0xff:
break
elif keys == self.LEFT | self.B:
op.append("↞")
last_dir = self.LEFT
elif keys == self.RIGHT | self.B:
op.append("↠")
last_dir = self.RIGHT
else:
direction = keys & self.DPAD
arrows = self.dir_to_arrows[direction]
if keys & self.A:
if direction == last_dir:
op.append("↥")
else:
op.append(arrows)
op.append("A")
else:
op.append(arrows)
if keys & self.B:
op.append("B")
last_dir = direction
op.append(str(ticks))
ret.append("".join(op))
return " ".join(ret)
@classmethod
def from_string(cls, s: str) -> "TrackGhost":
# ←< ↑^ →> ↓v ↘\ ↙/ and ↖↗↔↕ should work too I guess
# r reset/rest
# ↥ jump while continuing movement
# ↞ dash left
# ↠ dash right
# ↳x typical jump dash, [(1, 8), (0, 16), (64, x)]
ret = cls()
matched = cls._splitter.finditer(s)
for mo in matched:
arrows: str = mo.group(0)
if arrows[0] == "#":
continue
try:
ticks: str = next(matched).group(0)
except StopIteration:
raise PokeImportError("Bad AI, missing tick count for final command.") from None
if not ticks.isdecimal():
raise PokeImportError(f"Expected tick count, found {ticks}")
ticks_i = int(ticks)
if arrows[0] == "↥" and all(x in "ABC" for x in arrows[1:]):
value = ret.ops[-1][0] | cls.A
if "B" in arrows:
value |= cls.B
if "C" in arrows:
value |= cls.C
ret.ops.append((value, ticks_i))
elif arrows in ("↠", "»"):
ret.ops.append((cls.RIGHT | cls.B, ticks_i))
elif arrows in ("↞", "«"):
ret.ops.append((cls.LEFT | cls.B, ticks_i))
else:
direction = 0
for a in arrows:
if a in "↥↠»↞«":
suggest = "A" if a == "↥" else "B"
raise PokeImportError(
f"{a} direction cannot be used with other directions. Use {suggest} instead."
)
try:
direction |= cls.arrow_to_dir[a]
except KeyError:
raise PokeImportError(f"Key {a} is unknown.")
ret.ops.append((direction, ticks_i))
ret.ops.append((0xff, 0))
return ret
class SpriteAttrs(NamedTuple):
x: int
y: int
tile: int
enable: bool
invert_color: bool
vflip: bool
hflip: bool
def to_bin(self) -> bytes:
options = (0x08 if self.enable else 0x00) | (0x04 if self.invert_color else
0x00) | (0x02 if self.vflip else 0x00) | (0x01 if self.hflip else 0x00)
return struct.pack("BBBB", self.x, self.y, self.tile, options)
@classmethod
def from_bin(cls, b: bytes) -> "TrackSplash":
options = b[3]
return cls(
b[0], b[1], b[2], bool(options & 0x08), bool(options & 0x04), bool(options & 0x02),
bool(options & 0x01)
)
class GrandPrixTrackMetaData(NamedTuple):
# bases are 3 bytes, the rest are 1
tileset_base: int
tilemap_base: int
width: int
height: int
bg_music: int
starting_x: int
starting_y: int
unk2: int
sprite_base: int
preview_tileset_base: int
preview_tilemap_base: int
preview_map_width: int
preview_map_height: int
@classmethod
def from_bin(cls, b: bytes) -> "GrandPrixTrackMetaData":
return cls(
*(
int.from_bytes(a, "little") if isinstance(a, bytes) else a
for a in struct.unpack("<3s3s6B3s3s3s2B", b)
)
)
def to_bin(self) -> bin:
return struct.pack(
"<3s3s6B3s3s3s2B", to3b(self.tileset_base), to3b(self.tilemap_base), self.width,
self.height, self.bg_music, self.starting_x, self.starting_y, self.unk2,
to3b(self.sprite_base), to3b(self.preview_tileset_base),
to3b(self.preview_tilemap_base), self.preview_map_width, self.preview_map_height
)
class GrandPrixTrack:
ident: str
index: Optional[int]
bases: Dict[str, int]
metadata: GrandPrixTrackMetaData
tilemap: Sequence[int]
preview_tilemap: Sequence[int]
title_ditto_tilemap: Sequence[int]
title_ranking_tilemap: Sequence[int]
splash_spritemap: Sequence[SpriteAttrs]
bgm: MinLibSound
ai_easy: TrackGhost
ai_normal: TrackGhost
ai_hard: TrackGhost
def __init__(self, ident: str, index: int, *, config: Dict[str, Any]):
self.bases = {}
self.ident = ident
self.index = index
self.config = config
def read(self, f: BinaryIO):
idx, config = self.index, self.config
metadata_base = read2b_base(f, config["metadata_array_base"], idx)
title_ditto_base = read2b_base(f, config["titles_nobar_tilemaps_array_base"], idx)
title_ranking_base = read2b_base(f, config["titles_bar_tilemaps_array_base"], idx)
f.seek(config["track_screens_array_base"] + 2 * idx)
splash_map_base = 0x070000 | int.from_bytes(f.read(2), "little")
ai_easy_base = read2b_base(f, config["ai_easy_table_base"], idx)
ai_normal_base = read2b_base(f, config["ai_normal_table_base"], idx)
ai_hard_base = read2b_base(f, config["ai_hard_table_base"], idx)
self.bases = {
"ai_easy": ai_easy_base,
"ai_normal": ai_normal_base,
"ai_hard": ai_hard_base,
"metadata": metadata_base,
"title_ditto": title_ditto_base,
"title_ranking": title_ranking_base,
"splash_map": splash_map_base,
}
f.seek(splash_map_base)
splash_map_data = f.read(12 * 4)
self.splash_spritemap = [
SpriteAttrs.from_bin(splash_map_data[i:i + 4]) for i in range(0, 12 * 4, 4)
]
f.seek(ai_easy_base)
self.ai_easy = TrackGhost.from_bin(f)
f.seek(ai_normal_base)
self.ai_normal = TrackGhost.from_bin(f)
f.seek(ai_hard_base)
self.ai_hard = TrackGhost.from_bin(f)
f.seek(metadata_base)
self.metadata = GrandPrixTrackMetaData.from_bin(f.read(23))
load_tileset(f, self.metadata.tileset_base, height=10)
load_tileset(f, self.metadata.preview_tileset_base)
f.seek(self.metadata.tilemap_base)
self.tilemap = f.read(self.metadata.width * self.metadata.height)
f.seek(self.metadata.preview_tilemap_base)
self.preview_tilemap = f.read(
self.metadata.preview_map_width * self.metadata.preview_map_height
)
# right-aligned
f.seek(title_ditto_base)
self.title_ditto_tilemap = f.read(8)
# right-aligned and double lined (screen border)
f.seek(title_ranking_base)
self.title_ranking_tilemap = f.read(8)
load_spriteset(f, self.metadata.sprite_base, height=12)
def write(self, f: BinaryIO, update_out: dict):
idx, config = self.index, self.config
write2b_base_and_seek(f, config["metadata_array_base"], idx, self.bases["metadata"])
f.write(self.metadata.to_bin())
f.seek(self.metadata.tileset_base)
f.write(encode_tiles(self.tileset))
f.seek(self.metadata.preview_tileset_base)
f.write(encode_tiles(self.preview_tileset))
f.seek(self.metadata.tilemap_base)
f.write(self.tilemap)
f.seek(self.metadata.preview_tilemap_base)
f.write(self.preview_tilemap)
update_out["ai"].add(self)
update_out["music"].add(self.bgm)
update_out["tilesets"] |= {self.metadata.tileset_base, self.metadata.preview_tileset_base}
update_out["spritesets"].add(self.metadata.sprite_base)
@property
def bgm(self):
return music[self.metadata.bg_music]
@property
def tileset(self):
return tilesets[self.metadata.tileset_base]
@property
def spriteset(self):
return spritesets[self.metadata.sprite_base]
@property
def preview_tileset(self):
return tilesets[self.metadata.preview_tileset_base]
| 6,938
| 2,197
| 184
|
02ab5ac38323d1a54c6ec402c2b0af7e9a378c0b
| 12,767
|
py
|
Python
|
robot.py
|
recantha/RedBoard
|
b47e3085d7550318473c73aaf2f089b950b6934a
|
[
"MIT"
] | 21
|
2019-10-07T22:55:36.000Z
|
2020-12-09T20:07:03.000Z
|
robot.py
|
recantha/RedBoard
|
b47e3085d7550318473c73aaf2f089b950b6934a
|
[
"MIT"
] | 5
|
2019-09-05T14:15:36.000Z
|
2020-03-17T20:21:42.000Z
|
robot.py
|
recantha/RedBoard
|
b47e3085d7550318473c73aaf2f089b950b6934a
|
[
"MIT"
] | 5
|
2018-05-02T16:38:28.000Z
|
2020-01-02T15:06:02.000Z
|
# This is an example program showing different methods of controlling motors, servos, and Neopixels.
# It works with a Rock Candy or PiHut PS3 controller.
# The left stick controls the speed and direction of both motors - push up to go forwards, down for backwards and left or right to steer.
# The right stick directly controls two servo motors connected to GPIO pins 21 and 22.
# The R1 button starts or stops turbo mode (the robot goes faster!) .
# The L1 and L2 buttons move a servo connected to GPIO 22 to two pre-set positions.
# The Square button starts or stops a servo connected to GPIO 20 slowly sweeping left to right. This uses multiprocessing to run at the same time as the main program loop.
# The Triangle, Circle, and X buttons start and stop different Neopixels sequences - also with multiprocessing.
# Author: Neil Lambeth. neil@redrobotics.co.uk @NeilRedRobotics
from __future__ import print_function # Make print work with python 2 & 3
from evdev import InputDevice, ecodes
import redboard
import multiprocessing
import time
try:
import neopixels # Neopixels need to be run with 'sudo', just a reminder!
except RuntimeError:
print ('')
print ("Remember to use 'sudo' if you're using neopixels!")
print ('')
exit()
dev = InputDevice('/dev/input/event0')
#print(dev)
device = str(dev).find('Rock Candy') # Look for a Rock Candy or PiHut controller
if device != -1:
print ('Controller: Rock Candy PS3 Gamepad')
controller = 1
else:
print ('Controller: PiHut PS3 Gamepad')
controller = 2
# Button mapping for different controllers
if controller == 1: # Rock Candy
triangle, x, square, circle = 307, 305, 304, 306
R1, R2, R3 = 309, 311, 315
L1, L2, L3 = 308, 310, 314
select, start, home = 312, 313, 316
if controller == 2: # PiHut
triangle, x, square, circle = 308, 304, 307, 305
R1, R2, R3 = 311, 313, 318
L1, L2, L3 = 310, 312, 317
select, start, home = 314, 315, 316
# Set up variables
RX = 0
LX = 0
RY = 0
RY = 0
LeftY = 0
LeftX = 0
LeftX_R = 0
LeftX_L = 0
Leftmotor = 0
Rightmotor = 0
LM_OLD = 0
RM_OLD = 0
turbo = False
invertX = False
triangleToggle = False
xToggle = False
circleToggle = False
squareToggle = False
# Function to use with multiprocessing to sweep a servo slowly left and right
# without interrupting the normal program flow
# Set up neopixel processes - neopixel code is in ~/RedBoard/neopixels.py
p1 = multiprocessing.Process(target = neopixels.knightRider)
p1.start() # Start the neopixel display when the program starts
triangleToggle = True
p2 = multiprocessing.Process(target = neopixels.headLights)
p3 = multiprocessing.Process(target = neopixels.demo)
p4 = multiprocessing.Process(target = servoSlowSweep)
# Read gamepad buttons-----------------------------------------------------------
for event in dev.read_loop():
#print(event) # Uncomment to show all button data
if event.type == ecodes.EV_KEY:
#print(event.code) # Uncomment to show each keycode
# Button pressed code
if event.value == 1:
if event.code == triangle and triangleToggle == False: # Toggles the button press - one press for on - one press for off.
triangleToggle = True
print ('triangle on')
# Start and stop the neopixel processes - it's important to only run one neopixel process at any one time. So check and stop other processes if they are running.
if p1.is_alive() == False: # Make sure the process isn't already running
if p2.is_alive() == True: # Kill the other process if it's running
p2.terminate()
if p3.is_alive() == True: # Kill the other process if it's running
p3.terminate()
p1 = multiprocessing.Process(target = neopixels.knightRider)
p1.start() # Start the process
elif event.code == triangle and triangleToggle == True:
triangleToggle = False
print ('triangle off')
p1.terminate()
neopixels.clear()
elif event.code == x and xToggle == False:
xToggle = True
print ('X on')
if p2.is_alive() == False: # Make sure the process isn't already running
if p1.is_alive() == True: # Kill the other process if it's running
p1.terminate()
if p3.is_alive() == True: # Kill the other process if it's running
p3.terminate()
p2 = multiprocessing.Process(target = neopixels.headLights)
p2.start() # Start the process
elif event.code == x and xToggle == True:
xToggle = False
print ('x off')
p2.terminate()
neopixels.clear()
elif event.code == circle and circleToggle == False:
circleToggle = True
print ('Circle on')
if p3.is_alive() == False: # Make sure the process isn't already running
if p1.is_alive() == True: # Kill the other process if it's running
p1.terminate()
if p2.is_alive() == True: # Kill the other process if it's running
p2.terminate()
p3 = multiprocessing.Process(target = neopixels.demo)
p3.start() # Start the process
elif event.code == circle and circleToggle == True:
circleToggle = False
print ('Circle off')
p3.terminate()
neopixels.clear()
elif event.code == square and squareToggle == False:
squareToggle = True
print ('Square on')
if p4.is_alive() == False: # Make sure the process isn't already running
p4 = multiprocessing.Process(target = servoSlowSweep)
p4.start() # Start the process
elif event.code == square and squareToggle == True:
squareToggle = False
print ('Square off')
p4.terminate()
elif event.code == R1:
print ('R1 - Turbo On')
turbo = True
elif event.code == R2:
print ('R2')
elif event.code == R3:
print ('R3')
elif event.code == L1:
print ('L1')
redboard.servo22(80) # Send the positon to the servo
elif event.code == L2:
print ('L2')
redboard.servo22(-80) # Send the positon to the servo
elif event.code == L3:
print ('L3')
elif event.code == select and invertX == False:
print ('Invert X')
invertX = True
elif event.code == select and invertX == True:
print ('Normal X')
invertX = False
elif event.code == start:
print ('Start')
elif event.code == home:
print ('Home')
# Button Release Code------------------------------------------------
if event.value == 0: # Button released
if event.code == R1: # Turbo Off
print ('R1 - Turbo Off')
turbo = False
elif event.code == R2:
print ('R2')
elif event.code == L1 or event.code == L2: # Servos Centre
print ('Servo Centre')
redboard.servo22(0)
# Analogue Sticks and Dpad---------------------------------------------
if event.type == ecodes.EV_ABS:
print('')
print('---------------------------------')
# Dpad
if event.code == 16:
if event.value == -1:
print ('Dpad LEFT')
if event.value == 1:
print ('Dpad RIGHT')
if event.code == 17:
if event.value == -1:
print ('Dpad UP')
if event.value == 1:
print ('Dpad DOWN')
# Right analogue stick servo controls
elif event.code == 5: # Right analogue Vertical stick
RY = event.value
#print (RY)
S21 = redboard.mapServo(RY) # Scale the value from the
# joystick to work with the servo
redboard.servo21_P(S21) # Send the positon to the servo
elif event.code == 2: # Right analogue Horizontal stick
RX = event.value
#print (RX)
S22 = redboard.mapServo(RX) # Scale the value from the
# joystick to work with the servo
redboard.servo22_P(S22) # Send the positon to the servo
# Left analogue stick motor controls
if event.code == 1: # Left analogue Vertical stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for forwards
# and 0- -127 for backwards
LY = event.value
if LY < 128: # Forwards
LeftY = 127 - LY
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif LY >= 128: # Backwards
LeftY = LY - 128
LeftY = -LeftY # Make negative
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif event.code == 0: # Left analogue Horizontal stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for left
# and 0-127 for right
LX = event.value
if LX < 128: # Left
LeftX_L = 127 - LX
#print('LX =',LX)
#print('LeftX_Left = ',LeftX_L)
if LX > 128: # Right
LeftX_R = LX - 128
#print('LX = ',LX)
#print('LeftX_Right = ',LeftX_R)
if LX == 128: # Make sure both values are zero if stick is in the centre
LeftX_L = 0
LeftX_R = 0
# Prepare the values to send to the motors
if LeftY == 0: #Turn on the spot if not going forwards or backwards
if LX <= 128: # Turn Left
Leftmotor = -LeftX_L # Reverse motor to turn on the spot
Rightmotor = LeftX_L
elif LX >= 127: # Turn Right
Leftmotor = LeftX_R
Rightmotor = -LeftX_R # Reverse motor to turn on the spot
elif LY <= 128: # Forwards
print ('Forwards')
Leftmotor = LeftY - LeftX_L # Mix steering values
if Leftmotor <1: # Stop motor going backwards
Leftmotor = 0;
Rightmotor = LeftY - LeftX_R # Mix steering values
if Rightmotor <1: # Stop motor going backwards
Rightmotor = 0;
elif LY >= 127: # Backwards
print('Backwards')
Leftmotor = LeftY + LeftX_L # Mix steering values
if Leftmotor >-1: # Stop motor going forwards
Leftmotor = 0;
Rightmotor = LeftY + LeftX_R # Mix steering values
if Rightmotor >-1: # Stop motor going forwards
Rightmotor = 0;
if turbo == True: # Double speed for turbo
LM = Leftmotor * 2
RM = Rightmotor * 2
else: # Normal speed
LM = Leftmotor
RM = Rightmotor
if LM != LM_OLD or RM != RM_OLD: # Only print motor speeds if they have changed
print ('Left motor =',LM)
print ('Right motor =',RM)
LM_OLD = LM
RM_OLD = RM
# Set motor speed and direction
if invertX == True: # Reverse steering controls
#print('Reverse steering')
redboard.M2_8bit(RM)
redboard.M1_8bit(LM)
else: # Normal steering controls
#print ('Normal steering')
redboard.M2_8bit(LM)
redboard.M1_8bit(RM)
| 31.9175
| 173
| 0.524242
|
# This is an example program showing different methods of controlling motors, servos, and Neopixels.
# It works with a Rock Candy or PiHut PS3 controller.
# The left stick controls the speed and direction of both motors - push up to go forwards, down for backwards and left or right to steer.
# The right stick directly controls two servo motors connected to GPIO pins 21 and 22.
# The R1 button starts or stops turbo mode (the robot goes faster!) .
# The L1 and L2 buttons move a servo connected to GPIO 22 to two pre-set positions.
# The Square button starts or stops a servo connected to GPIO 20 slowly sweeping left to right. This uses multiprocessing to run at the same time as the main program loop.
# The Triangle, Circle, and X buttons start and stop different Neopixels sequences - also with multiprocessing.
# Author: Neil Lambeth. neil@redrobotics.co.uk @NeilRedRobotics
from __future__ import print_function # Make print work with python 2 & 3
from evdev import InputDevice, ecodes
import redboard
import multiprocessing
import time
try:
import neopixels # Neopixels need to be run with 'sudo', just a reminder!
except RuntimeError:
print ('')
print ("Remember to use 'sudo' if you're using neopixels!")
print ('')
exit()
dev = InputDevice('/dev/input/event0')
#print(dev)
device = str(dev).find('Rock Candy') # Look for a Rock Candy or PiHut controller
if device != -1:
print ('Controller: Rock Candy PS3 Gamepad')
controller = 1
else:
print ('Controller: PiHut PS3 Gamepad')
controller = 2
# Button mapping for different controllers
if controller == 1: # Rock Candy
triangle, x, square, circle = 307, 305, 304, 306
R1, R2, R3 = 309, 311, 315
L1, L2, L3 = 308, 310, 314
select, start, home = 312, 313, 316
if controller == 2: # PiHut
triangle, x, square, circle = 308, 304, 307, 305
R1, R2, R3 = 311, 313, 318
L1, L2, L3 = 310, 312, 317
select, start, home = 314, 315, 316
# Set up variables
RX = 0
LX = 0
RY = 0
RY = 0
LeftY = 0
LeftX = 0
LeftX_R = 0
LeftX_L = 0
Leftmotor = 0
Rightmotor = 0
LM_OLD = 0
RM_OLD = 0
turbo = False
invertX = False
triangleToggle = False
xToggle = False
circleToggle = False
squareToggle = False
# Function to use with multiprocessing to sweep a servo slowly left and right
# without interrupting the normal program flow
def servoSlowSweep():
#print ('Servo Slow')
while True:
for i in range(600,2400,5):
redboard.servo20_P(i)
time.sleep(0.05)
for i in range(2400,600,-5):
redboard.servo20_P(i)
time.sleep(0.05)
# Set up neopixel processes - neopixel code is in ~/RedBoard/neopixels.py
p1 = multiprocessing.Process(target = neopixels.knightRider)
p1.start() # Start the neopixel display when the program starts
triangleToggle = True
p2 = multiprocessing.Process(target = neopixels.headLights)
p3 = multiprocessing.Process(target = neopixels.demo)
p4 = multiprocessing.Process(target = servoSlowSweep)
# Read gamepad buttons-----------------------------------------------------------
for event in dev.read_loop():
#print(event) # Uncomment to show all button data
if event.type == ecodes.EV_KEY:
#print(event.code) # Uncomment to show each keycode
# Button pressed code
if event.value == 1:
if event.code == triangle and triangleToggle == False: # Toggles the button press - one press for on - one press for off.
triangleToggle = True
print ('triangle on')
# Start and stop the neopixel processes - it's important to only run one neopixel process at any one time. So check and stop other processes if they are running.
if p1.is_alive() == False: # Make sure the process isn't already running
if p2.is_alive() == True: # Kill the other process if it's running
p2.terminate()
if p3.is_alive() == True: # Kill the other process if it's running
p3.terminate()
p1 = multiprocessing.Process(target = neopixels.knightRider)
p1.start() # Start the process
elif event.code == triangle and triangleToggle == True:
triangleToggle = False
print ('triangle off')
p1.terminate()
neopixels.clear()
elif event.code == x and xToggle == False:
xToggle = True
print ('X on')
if p2.is_alive() == False: # Make sure the process isn't already running
if p1.is_alive() == True: # Kill the other process if it's running
p1.terminate()
if p3.is_alive() == True: # Kill the other process if it's running
p3.terminate()
p2 = multiprocessing.Process(target = neopixels.headLights)
p2.start() # Start the process
elif event.code == x and xToggle == True:
xToggle = False
print ('x off')
p2.terminate()
neopixels.clear()
elif event.code == circle and circleToggle == False:
circleToggle = True
print ('Circle on')
if p3.is_alive() == False: # Make sure the process isn't already running
if p1.is_alive() == True: # Kill the other process if it's running
p1.terminate()
if p2.is_alive() == True: # Kill the other process if it's running
p2.terminate()
p3 = multiprocessing.Process(target = neopixels.demo)
p3.start() # Start the process
elif event.code == circle and circleToggle == True:
circleToggle = False
print ('Circle off')
p3.terminate()
neopixels.clear()
elif event.code == square and squareToggle == False:
squareToggle = True
print ('Square on')
if p4.is_alive() == False: # Make sure the process isn't already running
p4 = multiprocessing.Process(target = servoSlowSweep)
p4.start() # Start the process
elif event.code == square and squareToggle == True:
squareToggle = False
print ('Square off')
p4.terminate()
elif event.code == R1:
print ('R1 - Turbo On')
turbo = True
elif event.code == R2:
print ('R2')
elif event.code == R3:
print ('R3')
elif event.code == L1:
print ('L1')
redboard.servo22(80) # Send the positon to the servo
elif event.code == L2:
print ('L2')
redboard.servo22(-80) # Send the positon to the servo
elif event.code == L3:
print ('L3')
elif event.code == select and invertX == False:
print ('Invert X')
invertX = True
elif event.code == select and invertX == True:
print ('Normal X')
invertX = False
elif event.code == start:
print ('Start')
elif event.code == home:
print ('Home')
# Button Release Code------------------------------------------------
if event.value == 0: # Button released
if event.code == R1: # Turbo Off
print ('R1 - Turbo Off')
turbo = False
elif event.code == R2:
print ('R2')
elif event.code == L1 or event.code == L2: # Servos Centre
print ('Servo Centre')
redboard.servo22(0)
# Analogue Sticks and Dpad---------------------------------------------
if event.type == ecodes.EV_ABS:
print('')
print('---------------------------------')
# Dpad
if event.code == 16:
if event.value == -1:
print ('Dpad LEFT')
if event.value == 1:
print ('Dpad RIGHT')
if event.code == 17:
if event.value == -1:
print ('Dpad UP')
if event.value == 1:
print ('Dpad DOWN')
# Right analogue stick servo controls
elif event.code == 5: # Right analogue Vertical stick
RY = event.value
#print (RY)
S21 = redboard.mapServo(RY) # Scale the value from the
# joystick to work with the servo
redboard.servo21_P(S21) # Send the positon to the servo
elif event.code == 2: # Right analogue Horizontal stick
RX = event.value
#print (RX)
S22 = redboard.mapServo(RX) # Scale the value from the
# joystick to work with the servo
redboard.servo22_P(S22) # Send the positon to the servo
# Left analogue stick motor controls
if event.code == 1: # Left analogue Vertical stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for forwards
# and 0- -127 for backwards
LY = event.value
if LY < 128: # Forwards
LeftY = 127 - LY
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif LY >= 128: # Backwards
LeftY = LY - 128
LeftY = -LeftY # Make negative
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif event.code == 0: # Left analogue Horizontal stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for left
# and 0-127 for right
LX = event.value
if LX < 128: # Left
LeftX_L = 127 - LX
#print('LX =',LX)
#print('LeftX_Left = ',LeftX_L)
if LX > 128: # Right
LeftX_R = LX - 128
#print('LX = ',LX)
#print('LeftX_Right = ',LeftX_R)
if LX == 128: # Make sure both values are zero if stick is in the centre
LeftX_L = 0
LeftX_R = 0
# Prepare the values to send to the motors
if LeftY == 0: #Turn on the spot if not going forwards or backwards
if LX <= 128: # Turn Left
Leftmotor = -LeftX_L # Reverse motor to turn on the spot
Rightmotor = LeftX_L
elif LX >= 127: # Turn Right
Leftmotor = LeftX_R
Rightmotor = -LeftX_R # Reverse motor to turn on the spot
elif LY <= 128: # Forwards
print ('Forwards')
Leftmotor = LeftY - LeftX_L # Mix steering values
if Leftmotor <1: # Stop motor going backwards
Leftmotor = 0;
Rightmotor = LeftY - LeftX_R # Mix steering values
if Rightmotor <1: # Stop motor going backwards
Rightmotor = 0;
elif LY >= 127: # Backwards
print('Backwards')
Leftmotor = LeftY + LeftX_L # Mix steering values
if Leftmotor >-1: # Stop motor going forwards
Leftmotor = 0;
Rightmotor = LeftY + LeftX_R # Mix steering values
if Rightmotor >-1: # Stop motor going forwards
Rightmotor = 0;
if turbo == True: # Double speed for turbo
LM = Leftmotor * 2
RM = Rightmotor * 2
else: # Normal speed
LM = Leftmotor
RM = Rightmotor
if LM != LM_OLD or RM != RM_OLD: # Only print motor speeds if they have changed
print ('Left motor =',LM)
print ('Right motor =',RM)
LM_OLD = LM
RM_OLD = RM
# Set motor speed and direction
if invertX == True: # Reverse steering controls
#print('Reverse steering')
redboard.M2_8bit(RM)
redboard.M1_8bit(LM)
else: # Normal steering controls
#print ('Normal steering')
redboard.M2_8bit(LM)
redboard.M1_8bit(RM)
| 252
| 0
| 22
|
a8e53e3b4dbf4f5115d1ab434dfe78e4bddcfa5a
| 3,429
|
py
|
Python
|
iceprod/server/globus.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 2
|
2017-01-23T17:12:41.000Z
|
2019-01-14T13:38:17.000Z
|
iceprod/server/globus.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 242
|
2016-05-09T18:46:51.000Z
|
2022-03-31T22:02:29.000Z
|
iceprod/server/globus.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 2
|
2017-03-27T09:13:40.000Z
|
2019-01-27T10:55:30.000Z
|
"""
Tools to help manage Globus proxies
"""
import os
import subprocess
import logging
from iceprod.server.config import IceProdConfig
logger = logging.getLogger('globus')
class SiteGlobusProxy(object):
"""
Manage site-wide globus proxy
:param cfgfile: cfgfile location (optional)
:param duration: proxy duration (optional, default 72 hours)
"""
def set_passphrase(self, p):
"""Set the passphrase"""
self.cfg['passphrase'] = p
def set_duration(self, d):
"""Set the duration"""
self.cfg['duration'] = d
def set_voms_vo(self, vo):
"""Set the voms VO"""
self.cfg['voms_vo'] = vo
def set_voms_role(self, r):
"""Set the voms role"""
self.cfg['voms_role'] = r
def update_proxy(self):
"""Update the proxy"""
if 'passphrase' not in self.cfg:
raise Exception('passphrase missing')
if 'duration' not in self.cfg:
raise Exception('duration missing')
logger.info('duration: %r',self.cfg['duration'])
if subprocess.call(['grid-proxy-info','-e',
'-valid','%d:0'%self.cfg['duration'],
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL):
# proxy needs updating
if 'voms_vo' in self.cfg and self.cfg['voms_vo']:
cmd = ['voms-proxy-init']
if 'voms_role' in self.cfg and self.cfg['voms_role']:
vo = self.cfg['voms_vo']
role = self.cfg['voms_role']
cmd.extend(['-voms', '{0}:/{0}/Role={1}'.format(vo, role)])
else:
cmd.extend(['-voms', self.cfg['voms_vo']])
else:
cmd = ['grid-proxy-init']
cmd.extend(['-pwstdin','-valid','%d:0'%(self.cfg['duration']+1)])
if 'out' in self.cfg:
cmd.extend(['-out', self.cfg['out']])
inputbytes = (self.cfg['passphrase']+'\n').encode('utf-8')
p = subprocess.run(cmd, input=inputbytes, capture_output=True, timeout=60, check=False)
logger.info('proxy cmd: %r', p.args)
logger.info('stdout: %s', p.stdout)
logger.info('stderr: %s', p.stderr)
if 'voms_vo' in self.cfg and self.cfg['voms_vo']:
for line in p.stdout.decode('utf-8').split('\n'):
if line.startswith('Creating proxy') and line.endswith('Done'):
break # this is a good proxy
else:
raise Exception('voms-proxy-init failed')
elif p.returncode > 0:
raise Exception('grid-proxy-init failed')
def get_proxy(self):
"""Get the proxy location"""
if 'out' in self.cfg:
return self.cfg['out']
FNULL = open(os.devnull, 'w')
return subprocess.check_output(['grid-proxy-info','-path'],
stderr=FNULL).decode('utf-8').strip()
| 37.681319
| 99
| 0.534267
|
"""
Tools to help manage Globus proxies
"""
import os
import subprocess
import logging
from iceprod.server.config import IceProdConfig
logger = logging.getLogger('globus')
class SiteGlobusProxy(object):
"""
Manage site-wide globus proxy
:param cfgfile: cfgfile location (optional)
:param duration: proxy duration (optional, default 72 hours)
"""
def __init__(self, cfgfile=None, duration=None):
if not cfgfile:
cfgfile = os.path.join(os.getcwd(),'globus_proxy.json')
self.cfg = IceProdConfig(filename=cfgfile, defaults=False,
validate=False)
if duration:
self.cfg['duration'] = duration
elif 'duration' not in self.cfg:
self.cfg['duration'] = 72
def set_passphrase(self, p):
"""Set the passphrase"""
self.cfg['passphrase'] = p
def set_duration(self, d):
"""Set the duration"""
self.cfg['duration'] = d
def set_voms_vo(self, vo):
"""Set the voms VO"""
self.cfg['voms_vo'] = vo
def set_voms_role(self, r):
"""Set the voms role"""
self.cfg['voms_role'] = r
def update_proxy(self):
"""Update the proxy"""
if 'passphrase' not in self.cfg:
raise Exception('passphrase missing')
if 'duration' not in self.cfg:
raise Exception('duration missing')
logger.info('duration: %r',self.cfg['duration'])
if subprocess.call(['grid-proxy-info','-e',
'-valid','%d:0'%self.cfg['duration'],
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL):
# proxy needs updating
if 'voms_vo' in self.cfg and self.cfg['voms_vo']:
cmd = ['voms-proxy-init']
if 'voms_role' in self.cfg and self.cfg['voms_role']:
vo = self.cfg['voms_vo']
role = self.cfg['voms_role']
cmd.extend(['-voms', '{0}:/{0}/Role={1}'.format(vo, role)])
else:
cmd.extend(['-voms', self.cfg['voms_vo']])
else:
cmd = ['grid-proxy-init']
cmd.extend(['-pwstdin','-valid','%d:0'%(self.cfg['duration']+1)])
if 'out' in self.cfg:
cmd.extend(['-out', self.cfg['out']])
inputbytes = (self.cfg['passphrase']+'\n').encode('utf-8')
p = subprocess.run(cmd, input=inputbytes, capture_output=True, timeout=60, check=False)
logger.info('proxy cmd: %r', p.args)
logger.info('stdout: %s', p.stdout)
logger.info('stderr: %s', p.stderr)
if 'voms_vo' in self.cfg and self.cfg['voms_vo']:
for line in p.stdout.decode('utf-8').split('\n'):
if line.startswith('Creating proxy') and line.endswith('Done'):
break # this is a good proxy
else:
raise Exception('voms-proxy-init failed')
elif p.returncode > 0:
raise Exception('grid-proxy-init failed')
def get_proxy(self):
"""Get the proxy location"""
if 'out' in self.cfg:
return self.cfg['out']
FNULL = open(os.devnull, 'w')
return subprocess.check_output(['grid-proxy-info','-path'],
stderr=FNULL).decode('utf-8').strip()
| 379
| 0
| 26
|
718f6a531ed9f39d127e9a33e63589d99008dec5
| 2,268
|
py
|
Python
|
percy/resource_loader.py
|
robopsi/python-percy-client
|
c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb
|
[
"MIT"
] | 1
|
2017-10-31T11:29:24.000Z
|
2017-10-31T11:29:24.000Z
|
percy/resource_loader.py
|
robopsi/python-percy-client
|
c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb
|
[
"MIT"
] | 1
|
2021-03-26T00:50:40.000Z
|
2021-03-26T00:50:40.000Z
|
percy/resource_loader.py
|
rob-opsi/python-percy-client
|
c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb
|
[
"MIT"
] | 2
|
2018-06-05T02:33:05.000Z
|
2021-03-02T11:17:47.000Z
|
import os
import percy
from percy import utils
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
__all__ = ['ResourceLoader']
MAX_FILESIZE_BYTES = 15 * 1024**2 # 15 MiB.
| 32.869565
| 85
| 0.593915
|
import os
import percy
from percy import utils
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
__all__ = ['ResourceLoader']
MAX_FILESIZE_BYTES = 15 * 1024**2 # 15 MiB.
class BaseResourceLoader(object):
@property
def build_resources(self):
raise NotImplementedError('subclass must implement abstract method')
@property
def snapshot_resources(self):
raise NotImplementedError('subclass must implement abstract method')
class ResourceLoader(BaseResourceLoader):
def __init__(self, root_dir=None, base_url=None, webdriver=None):
self.root_dir = root_dir
self.base_url = base_url
if self.base_url and self.base_url.endswith(os.path.sep):
self.base_url = self.base_url[:-1]
# TODO: more separate loader subclasses and pull out Selenium-specific logic?
self.webdriver = webdriver
@property
def build_resources(self):
resources = []
if not self.root_dir:
return resources
for root, dirs, files in os.walk(self.root_dir, followlinks=True):
for file_name in files:
path = os.path.join(root, file_name)
if os.path.getsize(path) > MAX_FILESIZE_BYTES:
continue
with open(path, 'rb') as f:
content = f.read()
path_for_url = path.replace(self.root_dir, '', 1)
resource_url = "{0}{1}".format(self.base_url, path_for_url)
resource = percy.Resource(
resource_url=resource_url,
sha=utils.sha256hash(content),
local_path=os.path.abspath(path),
)
resources.append(resource)
return resources
@property
def snapshot_resources(self):
# Only one snapshot resource, the root page HTML.
return [
percy.Resource(
# Assumes a Selenium webdriver interface.
resource_url=urlparse(self.webdriver.current_url).path,
is_root=True,
mimetype='text/html',
content=self.webdriver.page_source,
)
]
| 1,778
| 221
| 46
|
b963a238595dc05d6bc40e6f5888099b52a8fc14
| 20,515
|
py
|
Python
|
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
| 32.929374
| 165
| 0.640653
|
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False):
# print("Capturing expected headers:")
# print(expected_headers)
assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers)
for key, val in expected_headers.items():
assert isinstance(key, str)
assert isinstance(val, str)
cookie_key = uuid.uuid4().hex
log = logging.getLogger("Main.TestServer")
sucuri_reqs_1 = 0
sucuri_reqs_2 = 0
sucuri_reqs_3 = 0
class MockServerRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def validate_headers(self):
for key, value in expected_headers.items():
if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding':
# So PhantomJS monkeys with accept-encoding headers
# Just ignore that particular header, I guess.
pass
# Selenium is fucking retarded, and I can't override the user-agent
# and other assorted parameters via their API at all.
elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language':
pass
elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept':
pass
elif not skip_header_checks:
v1 = value.replace(" ", "")
v2 = self.headers[key]
if v2 is None:
v2 = ""
v2 = v2.replace(" ", "")
test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format(
key,
value,
self.headers[key],
{
'is_annoying_pjs' : is_annoying_pjs,
'is_chromium' : is_chromium,
'is_selenium_garbage_chromium' : is_selenium_garbage_chromium,
'skip_header_checks' : skip_header_checks,
},
)
)
def _get_handler(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
# print("Path: ", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
self.validate_headers()
except Exception:
self.send_response(500)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Headers failed validation!")
raise
if self.path == "/":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/favicon.ico":
self.send_response(404)
self.end_headers()
elif self.path == "/raw-txt":
self.send_response(200)
self.send_header('Content-type', "text/plain")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html-decode":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html/real":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Root OK?</body></html>")
elif self.path == "/compressed/deflate":
self.send_response(200)
self.send_header('Content-Encoding', 'deflate')
self.send_header('Content-type', "text/html")
self.end_headers()
inb = b"Root OK?"
cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)
t1 = cobj.compress(inb) + cobj.flush()
self.wfile.write(t1)
elif self.path == "/compressed/gzip":
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(gzip.compress(b"Root OK?"))
elif self.path == "/json/invalid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT")
elif self.path == "/json/valid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/json/no-coding":
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/filename/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/path-only-trailing-slash/":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-html-suffix":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='lolercoaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='loler coaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\"loler coaster.html\"")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/explicit-html-mime":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/redirect/bad-1":
self.send_response(302)
self.end_headers()
elif self.path == "/redirect/bad-2":
self.send_response(302)
self.send_header('location', "bad-2")
self.end_headers()
elif self.path == "/redirect/bad-3":
self.send_response(302)
self.send_header('location', "gopher://www.google.com")
self.end_headers()
elif self.path == "/redirect/from-1":
self.send_response(302)
self.send_header('location', "to-1")
self.end_headers()
elif self.path == "/redirect/to-1":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-1")
elif self.path == "/redirect/from-2":
self.send_response(302)
self.send_header('uri', "to-2")
self.end_headers()
elif self.path == "/redirect/to-2":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-2")
elif self.path == "/redirect/from-3":
self.send_response(302)
newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1])
self.send_header('uri', newurl)
self.end_headers()
elif self.path == "/password/expect":
# print("Password")
# print(self.headers)
self.send_response(200)
self.end_headers()
if not 'Authorization' in self.headers:
self.wfile.write(b"Password not sent!!")
return
val = self.headers['Authorization']
passval = val.split(" ")[-1]
passstr = base64.b64decode(passval)
if passstr == b'lol:wat':
self.wfile.write(b"Password Ok?")
else:
self.wfile.write(b"Password Bad!")
elif self.path == "/content/have-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>")
elif self.path == "/content/no-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
##################################################################################################################################
# Cookie stuff
##################################################################################################################################
elif self.path == '/cookie_test':
cook = cookies.SimpleCookie()
cook['cookie_test_key'] = cookie_key
cook['cookie_test_key']['path'] = "/"
cook['cookie_test_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())
self.end_headers()
self.wfile.write(b"<html><body>CF Cookie Test</body></html>")
elif self.path == '/cookie_require':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cookie_test_key' and cook_value == cookie_key:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>")
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie is missing</body></html>")
##################################################################################################################################
# Sucuri validation
##################################################################################################################################
elif self.path == '/sucuri_shit_3':
# I'd like to get this down to just 2 requests (cookie bounce, and fetch).
# Doing that requires pulling html content out of chromium, though.
# Annoying.
nonlocal sucuri_reqs_3
sucuri_reqs_3 += 1
if sucuri_reqs_3 > 3:
raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit_2':
# This particular path is the one we should already have a cookie for.
# As such, we expect one request only
nonlocal sucuri_reqs_2
sucuri_reqs_2 += 1
if sucuri_reqs_2 > 1:
raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit':
nonlocal sucuri_reqs_1
sucuri_reqs_1 += 1
if sucuri_reqs_1 > 4:
raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1)
# print("Fetch for ", self.path)
# print("Cookies:", self.headers.get_all('Cookie', failobj=[]))
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
##################################################################################################################################
# Cloudflare validation
##################################################################################################################################
elif self.path == '/cloudflare_under_attack_shit_2':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cloudflare_under_attack_shit':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594':
cook = cookies.SimpleCookie()
cook['cloudflare_validate_key'] = cookie_key
cook['cloudflare_validate_key']['path'] = "/"
cook['cloudflare_validate_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())
self.end_headers()
body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>"
self.wfile.write(body.encode("utf-8"))
##################################################################################################################################
# Handle requests for an unknown path
##################################################################################################################################
else:
test_context.assertEqual(self.path, "This shouldn't happen!")
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
log.info("Request for URL path: '%s'", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
return self._get_handler()
except Exception as e:
log.error("Exception in handler!")
for line in traceback.format_exc().split("\n"):
log.error(line)
raise e
return MockServerRequestHandler
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_server(assertion_class,
from_wg,
port_override = None,
is_chromium = None,
is_selenium_garbage_chromium = False,
is_annoying_pjs = False,
skip_header_checks = False
):
# Configure mock server.
if port_override:
mock_server_port = port_override
else:
mock_server_port = get_free_port()
expected_headers = dict(from_wg.browserHeaders)
print(from_wg)
print(expected_headers)
assert isinstance(expected_headers, dict)
captured_server = capture_expected_headers(
expected_headers = expected_headers,
test_context = assertion_class,
is_chromium = is_chromium,
is_selenium_garbage_chromium = is_selenium_garbage_chromium,
is_annoying_pjs = is_annoying_pjs,
skip_header_checks = skip_header_checks
)
retries = 4
for x in range(retries + 1):
try:
mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)
break
except OSError:
time.sleep(0.2)
if x >= retries:
raise
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_port, mock_server, mock_server_thread
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
| 19,918
| 0
| 69
|
67270613f64a8a2bf43b95c90d08c32285cdbd1c
| 20,081
|
py
|
Python
|
modules/music/music.py
|
naschorr/hawking
|
cdc98b7bc90c72d634f1fe877c34e7f9908ec4a8
|
[
"MIT"
] | 21
|
2017-08-06T02:47:05.000Z
|
2022-03-13T17:39:00.000Z
|
modules/music/music.py
|
naschorr/hawking
|
cdc98b7bc90c72d634f1fe877c34e7f9908ec4a8
|
[
"MIT"
] | 87
|
2017-12-26T17:07:59.000Z
|
2022-03-11T01:31:48.000Z
|
modules/music/music.py
|
naschorr/hawking
|
cdc98b7bc90c72d634f1fe877c34e7f9908ec4a8
|
[
"MIT"
] | 7
|
2019-10-23T17:30:34.000Z
|
2022-03-31T05:56:43.000Z
|
import re
import math
import random
import logging
from collections import OrderedDict
from discord.ext import commands
from common import utilities
from common import dynamo_manager
from common.module.discoverable_module import DiscoverableCog
from common.module.module_initialization_container import ModuleInitializationContainer
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class Music(DiscoverableCog):
'''
Note that there's something wrong with the note parsing logic. It still works, but it takes waaay too long now. I'll
look into it later.
'''
## Keys
BPM_KEY = "bpm"
OCTAVE_KEY = "octave"
TONE_KEY = "tone"
BAD_KEY = "bad"
BAD_PERCENT_KEY = "bad_percent"
## Defaults
BPM = CONFIG_OPTIONS.get(BPM_KEY, 100)
OCTAVE = CONFIG_OPTIONS.get(OCTAVE_KEY, 2)
TONE = CONFIG_OPTIONS.get(TONE_KEY, False)
BAD = CONFIG_OPTIONS.get(BAD_KEY, False)
BAD_PERCENT = CONFIG_OPTIONS.get(BAD_PERCENT_KEY, 10)
## Config
## todo: fix this
NOTES = ["c", "c#", "d", "d#", "e", "f", "f#", "g", "g#", "a", "a#", "b"]
HALF_STEPS = len(NOTES)
OCTAVES = 10
NOTE_REPLACEMENT = "[laa<{},{}>]"
REST = "r"
REST_REPLACEMENT = "[_<{},{}>]"
TONE_REPLACEMENT = "[:t <{},{}>]"
SHARP = "#"
## Properties
@property
## Methods
## Calculates the frequency of a note at a given number of half steps from the reference frequency
## Builds a dictionary of notes and their pitches at a given octave
## Pulls any TTS config options (ex. [:dv hs 10]) from the message string
## Pulls any music config options (ex. \bpm=N) from the message string
## Turns a list of Note objects into a string of TTS friendly phonemes
## Commands
@commands.command(no_pm=True, brief="Sings the given notes aloud!")
async def music(self, ctx, notes, ignore_char_limit=False):
"""
Sings the given notes aloud to your voice channel.
A note (or notes) can look like any of these:
'a' - Just the 'a' quarter note in the default second octave.
'2d' - A 'd' quarter note held for two beats, again in the second octave.
'c#4' - A 'c#' quarter note in the fourth octave.
'2b#3' - A 'b#' quarter note held for two beats, in the third octave.
'r' - A quarter rest.
'4r' - A quarter rest held for four beats.
'b/b' - Two 'b' eighth notes.
'2c#/d#/a3/f' - A 'c#' sixteenth note held for two beats, a 'd#' sixteenth note,
an 'a' sixteenth note in the third octave, and a 'f' sixteenth note.
Formatting:
Notes (at the moment) have four distinct parts (Duration?)(Note)(Sharp?)(Octave?).
Only the base note is necessary, everything else can be omitted if necessary
(see examples) A single space NEEDS to be inserted between notes.
You can chain notes together by inserting a '/' between notes, this lets you create
multiple shorter beats.
This lets you approximate eighth notes, sixteenth notes, thirty-second notes, and
really any other division of notes. (Twelfth, Twentieth, etc)
You can also use the | character to help with formatting your bars
(ex. 'c d e f | r g a b')
Inline Configuration:
BPM:
The '\\bpm=N' line can be inserted anywhere to adjust the bpm of notes in that
line. N can be any positive integer. (ex. '\\bpm=120' or '\\bpm=60')
Octave:
The '\\octave=N' line can be inserted anywhere to adjust the default octave of
notes in that line. N can be any integer between 0 and 9 (inclusive)
(ex. '\\octave=1' or '\\octave=3'), however 0 through 4 give the best results.
Tones:
The '\\tone=N' line can be inserted anywhere to set whether or not to use tones
instead of phonemes on that line. N can be either 0 or 1, where 0 disables tones,
and 1 enables them.
Bad:
The '\\bad=N' line can be inserted anywhere to set whether or not to make the notes
on that line sound worse (See: https://www.youtube.com/watch?v=KolfEhV-KiA). N can
be either 0 or 1, where 0 disables the badness, and 1 enables it.
Bad_Percent:
The '\\bad_percent=N' line can be inserted anywhere to set the level of badness,
when using the \\bad config. N can be any positive integer. It works as a
percentage where if N = 0, then it's not at all worse, and N = 100 would be 100%
worse. Needs \\bad to be set to have any effect.
Examples:
My Heart Will Go On (first 7 bars):
'\music \\bpm=100 f f f f | e 2f f | e 2f g | 2a 2g | f f f f | e 2f f | 2d 2r'
Sandstorm (kinda):
'\music \\bpm=136 \\octave=3 \\tone=1 b/b/b/b/b b/b/b/b/b/b/b e/e/e/e/e/e/e
d/d/d/d/d/d/d a b/b/b/b/b/b b/b/b/b/b/b c# b/b/b/b/b/a'
Defaults:
bpm = 100
octave = 2
tone = 0
bad = 0
bad_percent = 10
"""
## Todo: preserve the position of tts_configs in the message
tts_configs, message = self._extract_tts_configs(notes)
music_configs, message = self._extract_music_configs(notes)
bpm = music_configs.get(self.BPM_KEY, self.bpm)
beat_length = 60 / bpm # for a quarter note
octave = music_configs.get(self.OCTAVE_KEY, self.octave)
notes = MusicParser(message, beat_length, octave).notes
tts_notes = self._build_tts_note_string(notes, **music_configs)
await self.speech_cog._say(ctx, " ".join(tts_configs) + tts_notes, ignore_char_limit=ignore_char_limit)
| 38.617308
| 120
| 0.598377
|
import re
import math
import random
import logging
from collections import OrderedDict
from discord.ext import commands
from common import utilities
from common import dynamo_manager
from common.module.discoverable_module import DiscoverableCog
from common.module.module_initialization_container import ModuleInitializationContainer
## Config
CONFIG_OPTIONS = utilities.load_config()
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class Note:
def __init__(self, beat_length, duration, note, sharp=False, octave=4, sub_notes=[]):
self.beat_length = beat_length
self.duration = duration
self.note = note
self.sharp = sharp
self.octave = octave
self.sub_notes = sub_notes
self.dynamo_db = dynamo_manager.DynamoManager()
def __str__(self):
return "{}{}{} {}*{} [{}]".format(
self.note,
self.sharp or "",
self.octave,
self.beat_length,
self.duration,
", ".join(self.sub_notes)
)
class MusicParser:
## Config
INVALID_CHARS = ["|", ","]
CHAR_REGEX = r"([a-z])"
INT_REGEX = r"(\d)"
SHARP_REGEX = r"(#)"
CATCHALL_REGEX = r"(.?)"
FRACTIONAL_REGEX = r"(\/)"
## Start State Machine Classes
## Base state that all other states inherit from
class BaseState: # Todo: make virtual
def __init__(self, exit_dict={}, error_handler=None):
self.exit_dict = exit_dict
self.error_handler = error_handler
## Try to get the first character of a string
def emit_char(self, string):
try:
return string[0]
except:
return ""
## Try to get the first character of a string, and return the string that it was emitted from
def emit_consume_char(self, string):
try:
return string[0], string[1:]
except:
return "", ""
## Unimplemented state enter handler
def enter(self):
raise NotImplementedError("State.enter() isn't implemented")
## State exit handler
def exit(self, char, string, **kwargs):
for regex_string, handler in self.exit_dict.items():
match = re.match(regex_string, char)
## logger.debug("MATCHING {}, {}, {}".format(regex_string, char, handler.__self__.__class__.__name__))
if(match):
return handler(string, **kwargs)
if(self.error_handler):
self.error_handler(char, string)
return None
## Initial state
class StartState(BaseState):
def enter(self, string, **kwargs):
char = self.emit_char(string)
## logger.debug("Enter {} '{}', {}".format(self.__class__.__name__, string, kwargs))
return self.exit(char, string, **kwargs)
## Duration parsing state
class DurationState(BaseState):
def enter(self, string, **kwargs):
char, consumed_str = self.emit_consume_char(string)
## logger.debug("Enter {} '{}' '{}', {}".format(self.__class__.__name__, consumed_str, char, kwargs))
return self.exit(self.emit_char(consumed_str), consumed_str, duration=int(char), **kwargs)
## Note parsing state
class NoteState(BaseState):
def enter(self, string, **kwargs):
char, consumed_str = self.emit_consume_char(string)
## logger.debug("Enter {} '{}' '{}', {}".format(self.__class__.__name__, consumed_str, char, kwargs))
return self.exit(self.emit_char(consumed_str), consumed_str, note=char, **kwargs)
## Sharp parsing state
class SharpState(BaseState):
def enter(self, string, **kwargs):
char, consumed_str = self.emit_consume_char(string)
## logger.debug("Enter {} '{}' '{}', {}".format(self.__class__.__name__, consumed_str, char, kwargs))
return self.exit(self.emit_char(consumed_str), consumed_str, sharp=char, **kwargs)
## Octave parsing state
class OctaveState(BaseState):
def enter(self, string, **kwargs):
char, consumed_str = self.emit_consume_char(string)
## logger.debug("Enter {} '{}' '{}', {}".format(self.__class__.__name__, consumed_str, char, kwargs))
return self.exit(self.emit_char(consumed_str), consumed_str, octave=int(char), **kwargs)
## NoteObj creation state
class NoteObjState(BaseState):
def enter(self, string, **kwargs):
char, consumed_str = self.emit_consume_char(string)
## logger.debug("Enter {} '{}' '{}', {}".format(self.__class__.__name__, consumed_str, char, kwargs))
beat_length = kwargs.get("beat_length", 0.25)
duration = kwargs.get("duration", 1)
note = kwargs.get("note")
assert note is not None
sharp = kwargs.get("sharp", False)
octave = kwargs.get("octave", kwargs.get("default_octave", 2))
note_obj = Note(beat_length, duration, note, sharp, octave, [])
## Clean up kwargs for next pass
kwargs.pop("note_obj", None)
kwargs.pop("duration", None)
kwargs.pop("note", None)
kwargs.pop("sharp", None)
kwargs.pop("octave", None)
## Next state
return self.exit(char, string, note_obj=note_obj, **kwargs)
## SubNote creation state
class SubNoteState(BaseState):
def enter(self, string, **kwargs):
char, consumed_str = self.emit_consume_char(string)
## logger.debug("Enter {} '{}' '{}', {}".format(self.__class__.__name__, consumed_str, char, kwargs))
note_obj = kwargs.get("note_obj")
assert note_obj is not None
sub_notes = kwargs.get("sub_notes", [])
sub_notes.append(note_obj)
## Clean up kwargs for next pass
kwargs.pop("note_obj", None)
kwargs.pop("sub_notes", None)
return self.exit(self.emit_char(consumed_str), consumed_str, sub_notes=sub_notes, **kwargs)
## Final output state
class FinalState(BaseState):
def enter(self, string, **kwargs):
## logger.debug("Enter {} '{}', {}".format(self.__class__.__name__, string, kwargs))
note_obj = kwargs.get("note_obj")
sub_notes = kwargs.get("sub_notes", [])
beat_length = kwargs.get("beat_length", 0.25) / (len(sub_notes) + 1)
if(note_obj):
for note in sub_notes:
note.beat_length = beat_length
note_obj.beat_length = beat_length
note_obj.sub_notes = sub_notes
return note_obj
## Error handling state
class ErrorState(BaseState):
def enter(self, char, string):
logger.debug("Error in music state machine", char, string)
return None
## End State Machine Classes
def __init__(self, notes, beat_length=0.25, octave=4):
self.beat_length = beat_length
self.octave = octave
self.notes_preparsed = self._notes_preparser(notes)
self.parse_note = self._init_state_machine()
self.notes = []
for note in self.notes_preparsed:
parsed = self.parse_note(note.lower(), beat_length=self.beat_length, default_octave=self.octave)
if(parsed):
self.notes.append(parsed)
## Methods
## Initialize the note parsing state machine, returning a callable entry point (start.enter())
def _init_state_machine(self):
## Init error handler state
error_state_dict = {}
error_handler = self.ErrorState(error_state_dict, None).enter
## Init states
start_state = self.StartState({}, error_handler)
duration_state = self.DurationState({}, error_handler)
note_state = self.NoteState({}, error_handler)
sharp_state = self.SharpState({}, error_handler)
octave_state = self.OctaveState({}, error_handler)
note_obj_state = self.NoteObjState({}, error_handler)
sub_note_state = self.SubNoteState({}, error_handler)
final_state = self.FinalState({}, error_handler)
## Populate state exit_dicts
start_state.exit_dict = OrderedDict([(self.INT_REGEX, duration_state.enter),
(self.CHAR_REGEX, note_state.enter),
(self.CATCHALL_REGEX, final_state.enter)])
duration_state.exit_dict = OrderedDict([(self.CHAR_REGEX, note_state.enter)])
note_state.exit_dict = OrderedDict([(self.SHARP_REGEX, sharp_state.enter),
(self.INT_REGEX, octave_state.enter),
(self.CATCHALL_REGEX, note_obj_state.enter)])
sharp_state.exit_dict = OrderedDict([(self.INT_REGEX, octave_state.enter),
(self.CATCHALL_REGEX, note_obj_state.enter)])
octave_state.exit_dict = OrderedDict([(self.CATCHALL_REGEX, note_obj_state.enter)])
note_obj_state.exit_dict = OrderedDict([(self.FRACTIONAL_REGEX, sub_note_state.enter),
(self.CATCHALL_REGEX, final_state.enter)])
sub_note_state.exit_dict = OrderedDict([(self.CATCHALL_REGEX, start_state.enter)])
## Return an entry point into the fsm
return start_state.enter
def _notes_preparser(self, notes):
## Remove any invalid characters (usually used for formatting)
for char in self.INVALID_CHARS:
notes = notes.replace(char, "")
## Convert to a list of notes sans whitespace
notes_list = " ".join(notes.split()).split()
return notes_list
class Music(DiscoverableCog):
'''
Note that there's something wrong with the note parsing logic. It still works, but it takes waaay too long now. I'll
look into it later.
'''
## Keys
BPM_KEY = "bpm"
OCTAVE_KEY = "octave"
TONE_KEY = "tone"
BAD_KEY = "bad"
BAD_PERCENT_KEY = "bad_percent"
## Defaults
BPM = CONFIG_OPTIONS.get(BPM_KEY, 100)
OCTAVE = CONFIG_OPTIONS.get(OCTAVE_KEY, 2)
TONE = CONFIG_OPTIONS.get(TONE_KEY, False)
BAD = CONFIG_OPTIONS.get(BAD_KEY, False)
BAD_PERCENT = CONFIG_OPTIONS.get(BAD_PERCENT_KEY, 10)
## Config
## todo: fix this
NOTES = ["c", "c#", "d", "d#", "e", "f", "f#", "g", "g#", "a", "a#", "b"]
HALF_STEPS = len(NOTES)
OCTAVES = 10
NOTE_REPLACEMENT = "[laa<{},{}>]"
REST = "r"
REST_REPLACEMENT = "[_<{},{}>]"
TONE_REPLACEMENT = "[:t <{},{}>]"
SHARP = "#"
def __init__(self, hawking, bot, **kwargs):
super().__init__(*args, **kwargs)
self.hawking = hawking
self.bot = bot
self.bpm = int(kwargs.get(self.BPM_KEY, self.BPM))
self.octave = int(kwargs.get(self.OCTAVE_KEY, self.OCTAVE))
self.tone = kwargs.get(self.TONE_KEY, self.TONE)
self.bad = kwargs.get(self.BAD_KEY, self.BAD)
self.bad_percent = int(kwargs.get(self.BAD_PERCENT_KEY, self.BAD_PERCENT))
self.pitches = []
for octave in range(self.OCTAVES):
self.pitches.append(self._build_pitch_dict(octave))
## Properties
@property
def audio_player_cog(self):
return self.hawking.get_audio_player_cog()
## Methods
## Calculates the frequency of a note at a given number of half steps from the reference frequency
def _get_frequency(self, half_steps, reference=440):
a = 1.059463
frequency = int(reference * pow(a, half_steps))
return frequency
## Builds a dictionary of notes and their pitches at a given octave
def _build_pitch_dict(self, octave):
reference_frequency = 440 # A
reference_octave = 4
refence_steps_from_c4 = 9
reference_steps_from_c0 = self.HALF_STEPS * reference_octave + refence_steps_from_c4
pitch_dict = {}
for index, note in enumerate(self.NOTES):
half_steps = octave * self.HALF_STEPS + index
pitch_dict[note] = self._get_frequency(half_steps - reference_steps_from_c0,
reference_frequency)
return pitch_dict
## Pulls any TTS config options (ex. [:dv hs 10]) from the message string
def _extract_tts_configs(self, string):
tts_config_regex = r"(\[:.+?\])"
tts_configs = []
tts_config = re.search(tts_config_regex, string)
while(tts_config):
tts_configs.append(tts_config.group(1))
string = string[:tts_config.start()] + string[tts_config.end():]
tts_config = re.search(tts_config_regex, string)
return tts_configs, string
## Pulls any music config options (ex. \bpm=N) from the message string
def _extract_music_configs(self, string):
music_config_regex = r"\\([a-z_]+)\s?=\s?(\d+)"
music_configs = {}
music_config = re.search(music_config_regex, string)
while(music_config):
key = music_config.group(1)
value = int(music_config.group(2))
music_configs[key] = value
string = string[:music_config.start()] + string[music_config.end():]
music_config = re.search(music_config_regex, string)
return music_configs, string
## Turns a list of Note objects into a string of TTS friendly phonemes
def _build_tts_note_string(self, notes, **configs):
use_tones = configs.get(self.TONE_KEY, self.tone)
use_bad = configs.get(self.BAD_KEY, self.bad)
bad_percent = configs.get(self.BAD_PERCENT_KEY, self.bad_percent)
string = ""
note_index = 0
for note in notes:
## Push any sub_notes into their appropriate position in the notes list
sub_note_index = 0
for sub_note in note.sub_notes:
notes.insert(note_index + 1 + sub_note_index, sub_note)
sub_note_index += 1
## Create a textual representation of the note
note_str = note.note
if(note.sharp):
note_str += self.SHARP
## Select a format string for the type of note
if(note_str in self.NOTES):
replacement_str = self.NOTE_REPLACEMENT
try:
pitch = self.pitches[note.octave][note_str]
except IndexError:
continue
elif(note_str == self.REST):
replacement_str = self.REST_REPLACEMENT
pitch = 10 # Arbitrary low pitch
else:
continue
## Assign a duration of time to hold the note for
duration = note.duration
## Randomize the note's pitch and duration if use_bad is True
if(use_bad):
pitch_offset_max = pitch * (bad_percent / 100)
pitch += random.uniform(-pitch_offset_max, pitch_offset_max)
duration_offset_max = duration * (bad_percent / 100)
duration += random.uniform(-duration_offset_max, duration_offset_max)
## Create the TTS friendly string for the note, and use the tone format string if necessary
if(use_tones):
string += self.TONE_REPLACEMENT.format(int(pitch), int(note.beat_length * duration * 1000))
else:
string += replacement_str.format(int(note.beat_length * duration * 1000), int(pitch))
note_index += 1
return string
## Commands
@commands.command(no_pm=True, brief="Sings the given notes aloud!")
async def music(self, ctx, notes, ignore_char_limit=False):
"""
Sings the given notes aloud to your voice channel.
A note (or notes) can look like any of these:
'a' - Just the 'a' quarter note in the default second octave.
'2d' - A 'd' quarter note held for two beats, again in the second octave.
'c#4' - A 'c#' quarter note in the fourth octave.
'2b#3' - A 'b#' quarter note held for two beats, in the third octave.
'r' - A quarter rest.
'4r' - A quarter rest held for four beats.
'b/b' - Two 'b' eighth notes.
'2c#/d#/a3/f' - A 'c#' sixteenth note held for two beats, a 'd#' sixteenth note,
an 'a' sixteenth note in the third octave, and a 'f' sixteenth note.
Formatting:
Notes (at the moment) have four distinct parts (Duration?)(Note)(Sharp?)(Octave?).
Only the base note is necessary, everything else can be omitted if necessary
(see examples) A single space NEEDS to be inserted between notes.
You can chain notes together by inserting a '/' between notes, this lets you create
multiple shorter beats.
This lets you approximate eighth notes, sixteenth notes, thirty-second notes, and
really any other division of notes. (Twelfth, Twentieth, etc)
You can also use the | character to help with formatting your bars
(ex. 'c d e f | r g a b')
Inline Configuration:
BPM:
The '\\bpm=N' line can be inserted anywhere to adjust the bpm of notes in that
line. N can be any positive integer. (ex. '\\bpm=120' or '\\bpm=60')
Octave:
The '\\octave=N' line can be inserted anywhere to adjust the default octave of
notes in that line. N can be any integer between 0 and 9 (inclusive)
(ex. '\\octave=1' or '\\octave=3'), however 0 through 4 give the best results.
Tones:
The '\\tone=N' line can be inserted anywhere to set whether or not to use tones
instead of phonemes on that line. N can be either 0 or 1, where 0 disables tones,
and 1 enables them.
Bad:
The '\\bad=N' line can be inserted anywhere to set whether or not to make the notes
on that line sound worse (See: https://www.youtube.com/watch?v=KolfEhV-KiA). N can
be either 0 or 1, where 0 disables the badness, and 1 enables it.
Bad_Percent:
The '\\bad_percent=N' line can be inserted anywhere to set the level of badness,
when using the \\bad config. N can be any positive integer. It works as a
percentage where if N = 0, then it's not at all worse, and N = 100 would be 100%
worse. Needs \\bad to be set to have any effect.
Examples:
My Heart Will Go On (first 7 bars):
'\music \\bpm=100 f f f f | e 2f f | e 2f g | 2a 2g | f f f f | e 2f f | 2d 2r'
Sandstorm (kinda):
'\music \\bpm=136 \\octave=3 \\tone=1 b/b/b/b/b b/b/b/b/b/b/b e/e/e/e/e/e/e
d/d/d/d/d/d/d a b/b/b/b/b/b b/b/b/b/b/b c# b/b/b/b/b/a'
Defaults:
bpm = 100
octave = 2
tone = 0
bad = 0
bad_percent = 10
"""
## Todo: preserve the position of tts_configs in the message
tts_configs, message = self._extract_tts_configs(notes)
music_configs, message = self._extract_music_configs(notes)
bpm = music_configs.get(self.BPM_KEY, self.bpm)
beat_length = 60 / bpm # for a quarter note
octave = music_configs.get(self.OCTAVE_KEY, self.octave)
notes = MusicParser(message, beat_length, octave).notes
tts_notes = self._build_tts_note_string(notes, **music_configs)
await self.speech_cog._say(ctx, " ".join(tts_configs) + tts_notes, ignore_char_limit=ignore_char_limit)
def main() -> ModuleInitializationContainer:
## return ModuleInitializationContainer(Music)
return False
| 11,993
| 1,762
| 305
|
16d91b413c5b0ff989f0583278802423f8821a13
| 7,359
|
py
|
Python
|
tests/keras/preprocessing/test_image.py
|
cnvrg/keras
|
7f9bea44d5d4512fe21d0263d00fd39a9fb5c671
|
[
"MIT"
] | 1
|
2020-12-08T14:29:08.000Z
|
2020-12-08T14:29:08.000Z
|
tests/keras/preprocessing/test_image.py
|
cnvrg/keras
|
7f9bea44d5d4512fe21d0263d00fd39a9fb5c671
|
[
"MIT"
] | null | null | null |
tests/keras/preprocessing/test_image.py
|
cnvrg/keras
|
7f9bea44d5d4512fe21d0263d00fd39a9fb5c671
|
[
"MIT"
] | null | null | null |
import pytest
from keras.preprocessing import image
from PIL import Image
import numpy as np
import os
import shutil
import tempfile
if __name__ == '__main__':
pytest.main([__file__])
| 37.738462
| 107
| 0.555646
|
import pytest
from keras.preprocessing import image
from PIL import Image
import numpy as np
import os
import shutil
import tempfile
class TestImage:
def setup_class(cls):
img_w = img_h = 20
rgb_images = []
gray_images = []
for n in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGB')
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = Image.fromarray(imarray.astype('uint8').squeeze()).convert('L')
gray_images.append(im)
cls.all_test_images = [rgb_images, gray_images]
def teardown_class(cls):
del cls.all_test_images
def test_image_data_generator(self):
for test_images in self.all_test_images:
img_list = []
for im in test_images:
img_list.append(image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
generator.fit(images, augment=True)
tmp_folder = tempfile.mkdtemp(prefix='test_images')
for x, y in generator.flow(images, np.arange(images.shape[0]),
shuffle=True, save_to_dir=tmp_folder):
assert x.shape[1:] == images.shape[1:]
break
shutil.rmtree(tmp_folder)
def test_image_data_generator_invalid_data(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
dim_ordering='tf')
# Test fit with invalid data
with pytest.raises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
with pytest.raises(ValueError):
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
with pytest.raises(ValueError):
x = np.random.random((32, 10, 10, 5))
generator.fit(x)
# Test flow with invalid data
with pytest.raises(ValueError):
x = np.random.random((32, 10, 10, 5))
generator.flow(np.arange(x.shape[0]))
with pytest.raises(ValueError):
x = np.random.random((32, 10, 10))
generator.flow(np.arange(x.shape[0]))
with pytest.raises(ValueError):
x = np.random.random((32, 3, 10, 10))
generator.flow(np.arange(x.shape[0]))
def test_image_data_generator_fit(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
dim_ordering='tf')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
dim_ordering='th')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
num_classes = 2
tmp_folder = tempfile.mkdtemp(prefix='test_images')
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmp_folder, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in self.all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)], 'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(tmp_folder, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(tmp_folder)
# check number of classes and images
assert(len(dir_iterator.class_indices) == num_classes)
assert(len(dir_iterator.classes) == count)
assert(sorted(dir_iterator.filenames) == sorted(filenames))
shutil.rmtree(tmp_folder)
def test_img_utils(self):
height, width = 10, 8
# Test th dim ordering
x = np.random.random((3, height, width))
img = image.array_to_img(x, dim_ordering='th')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='th')
assert x.shape == (3, height, width)
# Test 2D
x = np.random.random((1, height, width))
img = image.array_to_img(x, dim_ordering='th')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='th')
assert x.shape == (1, height, width)
# Test tf dim ordering
x = np.random.random((height, width, 3))
img = image.array_to_img(x, dim_ordering='tf')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='tf')
assert x.shape == (height, width, 3)
# Test 2D
x = np.random.random((height, width, 1))
img = image.array_to_img(x, dim_ordering='tf')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='tf')
assert x.shape == (height, width, 1)
if __name__ == '__main__':
pytest.main([__file__])
| 6,933
| -5
| 228
|
d6583013d9b22bea192d0a72428b5f4e35977e91
| 1,132
|
py
|
Python
|
setup.py
|
zuiwan/CodingHub-CLI
|
9ced732de351412f1fd32b3a5eb67117e42779f6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
zuiwan/CodingHub-CLI
|
9ced732de351412f1fd32b3a5eb67117e42779f6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
zuiwan/CodingHub-CLI
|
9ced732de351412f1fd32b3a5eb67117e42779f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "ch-cli"
version = "0.1.0"
setup(
name=project,
version=version,
description="Command line tool for ch",
author="Zuiwan",
author_email="danceiny@gmail.com",
url="https://github.com/zuiwan/CodingHub-CLI.git",
packages=find_packages(exclude=("*.tests", "*.tests.*", "tests.*", "tests")),
inchude_package_data=True,
zip_safe=False,
keywords="ch",
install_requires=[
"click>=6.7",
"requests>=2.12.4",
"marshmallow>=2.11.1",
"pytz>=2016.10",
"shortuuid>=0.4.3",
"tabulate>=0.7.7",
"kafka-python>=1.3.3",
"pathlib2>=2.3.0",
"tzlocal>=1.4",
"progressbar33>=2.4",
"websocket-client>=0.44.0",
],
setup_requires=[
"nose>=1.0",
],
dependency_links=[
],
entry_points={
"console_scripts": [
"codehub = ch.main:cli",
"ch-dev = ch.development.dev:cli",
"ch-local = ch.development.local:cli",
],
},
tests_require=[
"mock>=1.0.1",
],
)
| 24.608696
| 81
| 0.539753
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "ch-cli"
version = "0.1.0"
setup(
name=project,
version=version,
description="Command line tool for ch",
author="Zuiwan",
author_email="danceiny@gmail.com",
url="https://github.com/zuiwan/CodingHub-CLI.git",
packages=find_packages(exclude=("*.tests", "*.tests.*", "tests.*", "tests")),
inchude_package_data=True,
zip_safe=False,
keywords="ch",
install_requires=[
"click>=6.7",
"requests>=2.12.4",
"marshmallow>=2.11.1",
"pytz>=2016.10",
"shortuuid>=0.4.3",
"tabulate>=0.7.7",
"kafka-python>=1.3.3",
"pathlib2>=2.3.0",
"tzlocal>=1.4",
"progressbar33>=2.4",
"websocket-client>=0.44.0",
],
setup_requires=[
"nose>=1.0",
],
dependency_links=[
],
entry_points={
"console_scripts": [
"codehub = ch.main:cli",
"ch-dev = ch.development.dev:cli",
"ch-local = ch.development.local:cli",
],
},
tests_require=[
"mock>=1.0.1",
],
)
| 0
| 0
| 0
|
c6781e3b5409730674626896503725c3c53ad9cf
| 4,135
|
py
|
Python
|
tronx/helpers/filters.py
|
JayPatel1314/Tron
|
d8f2d799eea344c0d76f0fe758ce385c7ceceea7
|
[
"MIT"
] | null | null | null |
tronx/helpers/filters.py
|
JayPatel1314/Tron
|
d8f2d799eea344c0d76f0fe758ce385c7ceceea7
|
[
"MIT"
] | null | null | null |
tronx/helpers/filters.py
|
JayPatel1314/Tron
|
d8f2d799eea344c0d76f0fe758ce385c7ceceea7
|
[
"MIT"
] | null | null | null |
import os
import re
from typing import (
Union,
List,
Dict,
Pattern
)
from pyrogram.filters import create
from pyrogram import filters, Client
from pyrogram.types import (
Message,
CallbackQuery,
InlineQuery,
InlineKeyboardMarkup,
ReplyKeyboardMarkup,
Update
)
from config import Config
from tronx.database.postgres.dv_sql import DVSQL
dv = DVSQL()
# custom regex filter
def MyPrefix():
"""Multiple prefix support function"""
return dv.getdv("PREFIX").split() or Config.PREFIX.split() or "."
# custom command filter
| 23.494318
| 106
| 0.685127
|
import os
import re
from typing import (
Union,
List,
Dict,
Pattern
)
from pyrogram.filters import create
from pyrogram import filters, Client
from pyrogram.types import (
Message,
CallbackQuery,
InlineQuery,
InlineKeyboardMarkup,
ReplyKeyboardMarkup,
Update
)
from config import Config
from tronx.database.postgres.dv_sql import DVSQL
dv = DVSQL()
# custom regex filter
def regex(
pattern: Union[str, Pattern],
flags: int = 0,
allow: list = []
):
async def func(flt, client: Client, update: Update):
# work for -> sudo & bot owner if sudo
if "sudo" in allow:
if update.from_user and not (update.from_user.is_self or update.from_user.id in client.SudoUsers()):
return False
# work only for -> bot owner if not sudo
elif not "sudo" in allow:
if update.from_user and not update.from_user.is_self:
return False
# work for -> forwarded message
if not "forward" in allow:
if update.forward_date:
return False
# work for -> messages in channel
if not "channel" in allow:
if update.chat.type == "channel":
return False
# work for -> edited message
if not "edited" in allow:
if update.edit_date:
return False
if isinstance(update, Message):
value = update.text or update.caption
elif isinstance(update, CallbackQuery):
value = update.data
elif isinstance(update, InlineQuery):
value = update.query
else:
raise ValueError(f"Regex filter doesn't work with {type(update)}")
if value:
update.matches = list(flt.p.finditer(value)) or None
return bool(update.matches)
return create(
func,
"RegexCommandFilter",
p=pattern if isinstance(pattern, Pattern) else re.compile(pattern, flags)
)
def MyPrefix():
"""Multiple prefix support function"""
return dv.getdv("PREFIX").split() or Config.PREFIX.split() or "."
# custom command filter
def gen(
commands: Union[str, List[str]],
prefixes: Union[str, List[str]] = MyPrefix(),
case_sensitive: bool = True,
allow: list = []
):
# update the commands and information of commands.
# modified func of pyrogram.filters.command
command_re = re.compile(r"([\"'])(.*?)(?<!\\)\1|(\S+)")
async def func(flt, client: Client, message: Message):
# Username shared among all commands; used for mention commands, e.g.: /start@username
global username
username = ""
text = message.text or message.caption
message.command = None
if not text:
return False
# work for -> sudo & bot owner if sudo
if "sudo" in allow:
if message.from_user and not (message.from_user.is_self or message.from_user.id in client.SudoUsers()):
return False
# work only for -> bot owner if not sudo
elif not "sudo" in allow:
if message.from_user and not message.from_user.is_self:
return False
# work for -> forwarded message
if not "forward" in allow:
if message.forward_date:
return False
# work for -> messages in channel
if not "channel" in allow:
if message.chat.type == "channel":
return False
# work for -> edited message
if not "edited" in allow:
if message.edit_date:
return False
for prefix in flt.prefixes:
if not text.startswith(prefix):
continue
without_prefix = text[len(prefix):]
username = None
for cmd in flt.commands:
if not re.match(rf"^(?:{cmd}(?:@?{username})?)(?:\s|$)", without_prefix,
flags=re.IGNORECASE if not flt.case_sensitive else 0):
continue
without_command = re.sub(rf"{cmd}(?:@?{username})?\s?", "", without_prefix, count=1)
message.command = [cmd] + [
re.sub(r"\\([\"'])", r"\1", m.group(2) or m.group(3) or "")
for m in command_re.finditer(without_command)
]
return True
return False
commands = commands if isinstance(commands, list) else [commands]
commands = {c if case_sensitive else c.lower() for c in commands}
prefixes = [] if prefixes is None else prefixes
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
prefixes = set(prefixes) if prefixes else {""}
return create(
func,
"MessageCommandFilter",
commands=commands,
prefixes=prefixes,
case_sensitive=case_sensitive
)
| 3,541
| 0
| 44
|
897bbc983e66081213620110e136f15fdd0ac12c
| 1,071
|
py
|
Python
|
train_vqvae.py
|
bipashasen/How2Sign-Blob
|
6e2af881d96d477fdb93104b8e53d943765c64ff
|
[
"MIT"
] | 6
|
2021-09-14T07:04:54.000Z
|
2022-03-24T16:07:41.000Z
|
train_vqvae.py
|
bipashasen/How2Sign-Blob
|
6e2af881d96d477fdb93104b8e53d943765c64ff
|
[
"MIT"
] | 4
|
2021-10-14T22:18:47.000Z
|
2022-03-30T13:03:07.000Z
|
train_vqvae.py
|
bipashasen/How2Sign-Blob
|
6e2af881d96d477fdb93104b8e53d943765c64ff
|
[
"MIT"
] | 3
|
2022-01-13T20:22:39.000Z
|
2022-03-03T12:52:44.000Z
|
import torch
from dataset import DrivingDataset
from vidvqvae import VQVAE
from torch.utils.data import DataLoader
from torch.utils.data import random_split
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
dataset = DrivingDataset("./generative", frames=16, skip=16)
print(len(dataset))
train_set, val_set = random_split(dataset, [10000, 3009], generator=torch.Generator().manual_seed(42))
train_loader = DataLoader(train_set, batch_size=16, num_workers=12)
val_loader = DataLoader(val_set, batch_size=8, num_workers=12)
model = VQVAE(
in_channel=3,
channel=128,
n_res_block=2,
n_res_channel=32,
embed_dim=64,
n_embed=512,
decay=0.99
)
# wandb_logger = WandbLogger(project="VidVQVAE", log_model="all")
# wandb_logger.watch(model)
# checkpoint_callback = ModelCheckpoint(monitor="val_loss")
# trainer = pl.Trainer(gpus=1, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer = pl.Trainer(gpus=1)
trainer.fit(model, train_loader, val_loader)
| 31.5
| 102
| 0.782446
|
import torch
from dataset import DrivingDataset
from vidvqvae import VQVAE
from torch.utils.data import DataLoader
from torch.utils.data import random_split
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
dataset = DrivingDataset("./generative", frames=16, skip=16)
print(len(dataset))
train_set, val_set = random_split(dataset, [10000, 3009], generator=torch.Generator().manual_seed(42))
train_loader = DataLoader(train_set, batch_size=16, num_workers=12)
val_loader = DataLoader(val_set, batch_size=8, num_workers=12)
model = VQVAE(
in_channel=3,
channel=128,
n_res_block=2,
n_res_channel=32,
embed_dim=64,
n_embed=512,
decay=0.99
)
# wandb_logger = WandbLogger(project="VidVQVAE", log_model="all")
# wandb_logger.watch(model)
# checkpoint_callback = ModelCheckpoint(monitor="val_loss")
# trainer = pl.Trainer(gpus=1, logger=wandb_logger, callbacks=[checkpoint_callback])
trainer = pl.Trainer(gpus=1)
trainer.fit(model, train_loader, val_loader)
| 0
| 0
| 0
|
16460dd8cef3c4ca6e999f16951e9835cac32996
| 3,369
|
py
|
Python
|
euca2ools/commands/autoscaling/terminateinstanceinautoscalinggroup.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 30
|
2015-02-10T05:47:38.000Z
|
2022-01-20T08:48:43.000Z
|
euca2ools/commands/autoscaling/terminateinstanceinautoscalinggroup.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 16
|
2015-01-08T23:24:34.000Z
|
2018-07-18T07:15:40.000Z
|
euca2ools/commands/autoscaling/terminateinstanceinautoscalinggroup.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 19
|
2015-05-07T05:34:42.000Z
|
2020-12-13T10:50:14.000Z
|
# Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from requestbuilder import Arg, MutuallyExclusiveArgList
from requestbuilder.mixins import TabifyingMixin
from euca2ools.commands.autoscaling import AutoScalingRequest
| 48.826087
| 79
| 0.666667
|
# Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from requestbuilder import Arg, MutuallyExclusiveArgList
from requestbuilder.mixins import TabifyingMixin
from euca2ools.commands.autoscaling import AutoScalingRequest
class TerminateInstanceInAutoScalingGroup(AutoScalingRequest,
TabifyingMixin):
DESCRIPTION = "Manually terminate an auto-scaling instance"
ARGS = [Arg('InstanceId', metavar='INSTANCE',
help='ID of the instance to terminate (required)'),
MutuallyExclusiveArgList(
Arg('-d', '--decrement-desired-capacity', action='store_const',
dest='ShouldDecrementDesiredCapacity', const='true',
help='''also reduce the desired capacity of the
auto-scaling group by 1'''),
Arg('-D', '--no-decrement-desired-capacity',
dest='ShouldDecrementDesiredCapacity',
action='store_const', const='false',
help='''leave the auto-scaling group's desired capacity
as-is. A new instance may be launched to compensate for
the one being terminated.'''))
.required(),
Arg('--show-long', action='store_true', route_to=None,
help='show extra info about the instance being terminated'),
Arg('-f', '--force', action='store_true', route_to=None,
help=argparse.SUPPRESS)] # for compatibility
def print_result(self, result):
activity = result['Activity']
bits = ['INSTANCE',
activity.get('ActivityId'),
activity.get('EndTime'),
activity.get('StatusCode'),
activity.get('Cause')]
if self.args['show_long']:
bits.append(activity.get('StatusMessage'))
bits.append(activity.get('Progress'))
bits.append(activity.get('Description'))
bits.append(activity.get('StartTime'))
print self.tabify(bits)
| 520
| 1,297
| 23
|
c055b680671015ae685487c0a21864dbeeb5d7bc
| 100
|
py
|
Python
|
Test.py
|
JonatasMSS/PythonProjects
|
795218987a4b50e7b4f62aa910d6647b8f91593b
|
[
"MIT"
] | null | null | null |
Test.py
|
JonatasMSS/PythonProjects
|
795218987a4b50e7b4f62aa910d6647b8f91593b
|
[
"MIT"
] | null | null | null |
Test.py
|
JonatasMSS/PythonProjects
|
795218987a4b50e7b4f62aa910d6647b8f91593b
|
[
"MIT"
] | null | null | null |
from ColorText import ColorText
Texto = ColorText.mudaCor('Olá Mundo!','blue','lo')
print(Texto)
| 25
| 52
| 0.73
|
from ColorText import ColorText
Texto = ColorText.mudaCor('Olá Mundo!','blue','lo')
print(Texto)
| 0
| 0
| 0
|
3b94585921a2a79178522ecf37a1d9d48c8968b5
| 5,484
|
py
|
Python
|
test/test_colors.py
|
braniii/prettypyplot
|
39d7d133fe0dc6699fafd57e00a0ec07672fd344
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_colors.py
|
braniii/prettypyplot
|
39d7d133fe0dc6699fafd57e00a0ec07672fd344
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_colors.py
|
braniii/prettypyplot
|
39d7d133fe0dc6699fafd57e00a0ec07672fd344
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for the color module.
BSD 3-Clause License
Copyright (c) 2020-2021, Daniel Nagel
All rights reserved.
"""
import numpy as np
import pytest
from matplotlib import colors as clr
import prettypyplot
@pytest.mark.parametrize('num, kwargs, error', [
(1, {}, None),
(2, {'high': 2}, None),
(2, {}, ValueError),
('a', {}, TypeError),
((1, 2), {}, TypeError),
])
def test__is_number_in_range(num, kwargs, error):
"""Test if number is in range."""
if error is None:
prettypyplot.colors._is_number_in_range(num, **kwargs)
else:
with pytest.raises(error):
prettypyplot.colors._is_number_in_range(num, **kwargs)
@pytest.mark.parametrize('L1, L2, refcontrast', [
(1, 0, 21), (0.5, 0.5, 1),
])
def test__contrast(L1, L2, refcontrast):
"""Test contrast."""
for l1, l2 in ((L1, L2), (L2, L1)):
contrast = prettypyplot.colors._contrast(l1, l2)
assert contrast == refcontrast
@pytest.mark.parametrize('rgb, refluminace', [
((1, 1, 1), 1),
((1, 0, 0), 0.2126),
((0, 1, 0), 0.7152),
((0, 0, 0), 0),
])
def test__relative_luminance(rgb, refluminace):
"""Test luminance."""
luminance = prettypyplot.colors._relative_luminance(rgb)
assert luminance == refluminace
@pytest.mark.parametrize('bgcolor, kwargs, refcolor, error', [
('w', {}, '#000000', None),
('b', {}, '#ffffff', None),
('w', {'colors': ('r', 'w', 'k')}, clr.to_rgb('k'), None),
('w', {'colors': ('r', 'w')}, clr.to_rgb('r'), None),
('#505050', {}, '#ffffff', None),
('#a0a0a0', {}, '#000000', None),
('notAColorCode', {}, None, ValueError),
('w', {'colors': ('notAColorCode')}, None, ValueError),
])
def test_text_color(bgcolor, kwargs, refcolor, error):
"""Test estimate text color."""
if error is None:
color = prettypyplot.colors.text_color(bgcolor, **kwargs)
assert clr.to_rgb(color) == clr.to_rgb(refcolor)
else:
with pytest.raises(error):
prettypyplot.colors.text_color(bgcolor, **kwargs)
@pytest.mark.parametrize('color, refbool, error', [
('k', True, None),
('w', True, None),
('r', False, None),
('#212121', True, None),
('#212122', False, None),
('NoColorCode', None, ValueError),
])
def test_is_grayshade(color, refbool, error):
"""Test if color is gray shade."""
if error is None:
assert refbool == prettypyplot.colors.is_greyshade(color)
else:
with pytest.raises(error):
prettypyplot.colors.is_greyshade(color)
@pytest.mark.parametrize('nsc, color, kwargs, refcolors, error', [
(2, 'k', {}, [[0, 0, 0], [0.75, 0.75, 0.75]], None),
(2, 'k', {'return_hex': False}, ['#000000', '#bfbfbf'], None),
(2, 'k', {'return_hex': True}, ['#000000', '#bfbfbf'], None),
(3, 'r', {}, ['#ff0000', '#ff6060', '#ffbfbf'], None),
(3, 'NoColorCoder', {}, None, ValueError),
(1.2, 'k', {}, None, TypeError),
('s', 'k', {}, None, TypeError),
(0, 'k', {}, None, ValueError),
(-5, 'k', {}, None, ValueError),
])
def test_categorical_color(nsc, color, kwargs, refcolors, error):
"""Test categorical color."""
if error is None:
colors = prettypyplot.colors.categorical_color(nsc, color, **kwargs)
# convert colors to hex
if 'return_hex' not in kwargs or not kwargs['return_hex']:
colors = [clr.to_hex(c) for c in colors]
assert all(
c == clr.to_hex(rc) for c, rc in zip(colors, refcolors)
)
else:
with pytest.raises(error):
prettypyplot.colors.categorical_color(nsc, color, **kwargs)
@pytest.mark.parametrize('nc, nsc, kwargs, ref, error', [
(
2,
2,
{'cmap': 'tab10'},
[
[0.12, 0.47, 0.71],
[0.75, 0.9, 1.0],
[1.0, 0.5, 0.06],
[1.0, 0.87, 0.75],
],
None,
),
(
2,
2,
{},
[
[0.12, 0.47, 0.71],
[0.75, 0.9, 1.0],
[1.0, 0.5, 0.06],
[1.0, 0.87, 0.75],
],
None,
),
(
2,
2,
{'return_colors': True},
[
[0.12, 0.47, 0.71],
[0.75, 0.9, 1.0],
[1.0, 0.5, 0.06],
[1.0, 0.87, 0.75],
],
None,
),
(
1,
2,
{'cmap': 'jet'},
[[0.0, 0.0, 0.5], [0.75, 0.75, 1.0]],
None,
),
(2, 2, {'cmap': 'NoColorMap'}, None, ValueError),
(20, 2, {'cmap': 'tab10'}, None, ValueError),
(-2, 2, {}, None, ValueError),
(2, -2, {}, None, ValueError),
(2, -2, {}, None, ValueError),
])
def test_categorical_cmap(nc, nsc, kwargs, ref, error):
"""Test categorical cmap."""
if error is None:
colors = prettypyplot.colors.categorical_cmap(nc, nsc, **kwargs)
# convert colors to hex
if 'return_colors' in kwargs and kwargs['return_colors']:
colors = colors.reshape(-1, 3)
else:
colors = colors.colors
np.testing.assert_array_almost_equal(colors, ref, decimal=2)
else:
with pytest.raises(error):
prettypyplot.colors.categorical_cmap(nc, nsc, **kwargs)
# dummy coverage tests
def test_load_colors():
"""Check that no error get raised."""
prettypyplot.colors.load_colors()
def test_load_cmaps():
"""Check that no error get raised."""
prettypyplot.colors.load_cmaps()
| 28.712042
| 76
| 0.539752
|
# -*- coding: utf-8 -*-
"""Tests for the color module.
BSD 3-Clause License
Copyright (c) 2020-2021, Daniel Nagel
All rights reserved.
"""
import numpy as np
import pytest
from matplotlib import colors as clr
import prettypyplot
@pytest.mark.parametrize('num, kwargs, error', [
(1, {}, None),
(2, {'high': 2}, None),
(2, {}, ValueError),
('a', {}, TypeError),
((1, 2), {}, TypeError),
])
def test__is_number_in_range(num, kwargs, error):
"""Test if number is in range."""
if error is None:
prettypyplot.colors._is_number_in_range(num, **kwargs)
else:
with pytest.raises(error):
prettypyplot.colors._is_number_in_range(num, **kwargs)
@pytest.mark.parametrize('L1, L2, refcontrast', [
(1, 0, 21), (0.5, 0.5, 1),
])
def test__contrast(L1, L2, refcontrast):
"""Test contrast."""
for l1, l2 in ((L1, L2), (L2, L1)):
contrast = prettypyplot.colors._contrast(l1, l2)
assert contrast == refcontrast
@pytest.mark.parametrize('rgb, refluminace', [
((1, 1, 1), 1),
((1, 0, 0), 0.2126),
((0, 1, 0), 0.7152),
((0, 0, 0), 0),
])
def test__relative_luminance(rgb, refluminace):
"""Test luminance."""
luminance = prettypyplot.colors._relative_luminance(rgb)
assert luminance == refluminace
@pytest.mark.parametrize('bgcolor, kwargs, refcolor, error', [
('w', {}, '#000000', None),
('b', {}, '#ffffff', None),
('w', {'colors': ('r', 'w', 'k')}, clr.to_rgb('k'), None),
('w', {'colors': ('r', 'w')}, clr.to_rgb('r'), None),
('#505050', {}, '#ffffff', None),
('#a0a0a0', {}, '#000000', None),
('notAColorCode', {}, None, ValueError),
('w', {'colors': ('notAColorCode')}, None, ValueError),
])
def test_text_color(bgcolor, kwargs, refcolor, error):
"""Test estimate text color."""
if error is None:
color = prettypyplot.colors.text_color(bgcolor, **kwargs)
assert clr.to_rgb(color) == clr.to_rgb(refcolor)
else:
with pytest.raises(error):
prettypyplot.colors.text_color(bgcolor, **kwargs)
@pytest.mark.parametrize('color, refbool, error', [
('k', True, None),
('w', True, None),
('r', False, None),
('#212121', True, None),
('#212122', False, None),
('NoColorCode', None, ValueError),
])
def test_is_grayshade(color, refbool, error):
"""Test if color is gray shade."""
if error is None:
assert refbool == prettypyplot.colors.is_greyshade(color)
else:
with pytest.raises(error):
prettypyplot.colors.is_greyshade(color)
@pytest.mark.parametrize('nsc, color, kwargs, refcolors, error', [
(2, 'k', {}, [[0, 0, 0], [0.75, 0.75, 0.75]], None),
(2, 'k', {'return_hex': False}, ['#000000', '#bfbfbf'], None),
(2, 'k', {'return_hex': True}, ['#000000', '#bfbfbf'], None),
(3, 'r', {}, ['#ff0000', '#ff6060', '#ffbfbf'], None),
(3, 'NoColorCoder', {}, None, ValueError),
(1.2, 'k', {}, None, TypeError),
('s', 'k', {}, None, TypeError),
(0, 'k', {}, None, ValueError),
(-5, 'k', {}, None, ValueError),
])
def test_categorical_color(nsc, color, kwargs, refcolors, error):
"""Test categorical color."""
if error is None:
colors = prettypyplot.colors.categorical_color(nsc, color, **kwargs)
# convert colors to hex
if 'return_hex' not in kwargs or not kwargs['return_hex']:
colors = [clr.to_hex(c) for c in colors]
assert all(
c == clr.to_hex(rc) for c, rc in zip(colors, refcolors)
)
else:
with pytest.raises(error):
prettypyplot.colors.categorical_color(nsc, color, **kwargs)
@pytest.mark.parametrize('nc, nsc, kwargs, ref, error', [
(
2,
2,
{'cmap': 'tab10'},
[
[0.12, 0.47, 0.71],
[0.75, 0.9, 1.0],
[1.0, 0.5, 0.06],
[1.0, 0.87, 0.75],
],
None,
),
(
2,
2,
{},
[
[0.12, 0.47, 0.71],
[0.75, 0.9, 1.0],
[1.0, 0.5, 0.06],
[1.0, 0.87, 0.75],
],
None,
),
(
2,
2,
{'return_colors': True},
[
[0.12, 0.47, 0.71],
[0.75, 0.9, 1.0],
[1.0, 0.5, 0.06],
[1.0, 0.87, 0.75],
],
None,
),
(
1,
2,
{'cmap': 'jet'},
[[0.0, 0.0, 0.5], [0.75, 0.75, 1.0]],
None,
),
(2, 2, {'cmap': 'NoColorMap'}, None, ValueError),
(20, 2, {'cmap': 'tab10'}, None, ValueError),
(-2, 2, {}, None, ValueError),
(2, -2, {}, None, ValueError),
(2, -2, {}, None, ValueError),
])
def test_categorical_cmap(nc, nsc, kwargs, ref, error):
"""Test categorical cmap."""
if error is None:
colors = prettypyplot.colors.categorical_cmap(nc, nsc, **kwargs)
# convert colors to hex
if 'return_colors' in kwargs and kwargs['return_colors']:
colors = colors.reshape(-1, 3)
else:
colors = colors.colors
np.testing.assert_array_almost_equal(colors, ref, decimal=2)
else:
with pytest.raises(error):
prettypyplot.colors.categorical_cmap(nc, nsc, **kwargs)
# dummy coverage tests
def test_load_colors():
"""Check that no error get raised."""
prettypyplot.colors.load_colors()
def test_load_cmaps():
"""Check that no error get raised."""
prettypyplot.colors.load_cmaps()
| 0
| 0
| 0
|
83c0d62cdfe8ec13e3ea83047998446d9035f49a
| 927
|
py
|
Python
|
tests/conftest.py
|
RasaHQ/rasa_stack
|
f2a5637fee5bda0faa9fc691c453a40cffc36666
|
[
"Apache-2.0"
] | 11
|
2018-11-03T20:20:36.000Z
|
2020-07-22T08:28:40.000Z
|
tests/conftest.py
|
RasaHQ/rasa_stack
|
f2a5637fee5bda0faa9fc691c453a40cffc36666
|
[
"Apache-2.0"
] | 10
|
2019-01-24T13:25:11.000Z
|
2019-03-13T16:26:16.000Z
|
tests/conftest.py
|
RasaHQ/rasa_stack
|
f2a5637fee5bda0faa9fc691c453a40cffc36666
|
[
"Apache-2.0"
] | 4
|
2019-02-10T10:48:38.000Z
|
2021-07-22T07:01:29.000Z
|
from typing import Text
import os
import pytest
from rasa.constants import (DEFAULT_DOMAIN_PATH, DEFAULT_CONFIG_PATH,
DEFAULT_MODELS_PATH, DEFAULT_DATA_PATH)
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
| 25.75
| 69
| 0.734628
|
from typing import Text
import os
import pytest
from rasa.constants import (DEFAULT_DOMAIN_PATH, DEFAULT_CONFIG_PATH,
DEFAULT_MODELS_PATH, DEFAULT_DATA_PATH)
@pytest.fixture(scope="session")
def project() -> Text:
import tempfile
from rasa.cli.scaffold import _create_initial_project
directory = tempfile.mkdtemp()
_create_initial_project(directory)
return directory
def train_model(project: Text, filename: Text = "test.tar.gz"):
import rasa.train
output = os.path.join(project, DEFAULT_MODELS_PATH, filename)
domain = os.path.join(project, DEFAULT_DOMAIN_PATH)
config = os.path.join(project, DEFAULT_CONFIG_PATH)
training_files = os.path.join(project, DEFAULT_DATA_PATH)
rasa.train(domain, config, training_files, output)
return output
@pytest.fixture(scope="session")
def trained_model(project) -> Text:
return train_model(project)
| 602
| 0
| 67
|
3d3b585878eec40a93008097771d36002f81b5a6
| 1,157
|
py
|
Python
|
test/test_01_decode_encode.py
|
hashberg-io/bases
|
9306573ee3794947efb91b70087d62f98607cadc
|
[
"MIT"
] | 1
|
2021-12-31T19:29:55.000Z
|
2021-12-31T19:29:55.000Z
|
test/test_01_decode_encode.py
|
hashberg-io/bases
|
9306573ee3794947efb91b70087d62f98607cadc
|
[
"MIT"
] | null | null | null |
test/test_01_decode_encode.py
|
hashberg-io/bases
|
9306573ee3794947efb91b70087d62f98607cadc
|
[
"MIT"
] | null | null | null |
# pylint: disable = missing-docstring
import pytest
from bases import encoding
from bases.encoding import BaseEncoding
from bases import random
from bases.random import rand_str
random.set_options(min_bytes=0, max_bytes=16)
nsamples = 1024
@pytest.mark.parametrize("enc_name,enc", list(encoding.table()))
| 34.029412
| 82
| 0.666379
|
# pylint: disable = missing-docstring
import pytest
from bases import encoding
from bases.encoding import BaseEncoding
from bases import random
from bases.random import rand_str
random.set_options(min_bytes=0, max_bytes=16)
nsamples = 1024
def _test_decode_encode(i: int, s: str, enc_name: str, enc: BaseEncoding) -> None:
try:
error_msg = f"encoding {repr(enc_name)} failed at #{i} = {repr(s)}"
b = enc.decode(s)
error_msg += f" with b = {list(b)}"
s_enc = enc.encode(b)
error_msg += f" and s_enc = {repr(s_enc)}"
s_canonical = enc.canonical_string(s)
if s_canonical != s:
error_msg += f" where s_canonical = {repr(s_canonical)}"
assert s_enc == s_canonical, error_msg
except Exception as e:
if not isinstance(e, AssertionError):
raise Exception(error_msg) from e
raise e
@pytest.mark.parametrize("enc_name,enc", list(encoding.table()))
def test_decode_encode(enc_name: str, enc: BaseEncoding) -> None:
test_data = rand_str(nsamples, encoding=enc)
for i, s in enumerate(test_data):
_test_decode_encode(i, s, enc_name, enc)
| 803
| 0
| 45
|
63c538aa0950d31879a3e1554c0b16512302bf07
| 136
|
py
|
Python
|
schemas/error_response.py
|
victorgrubio/flask-mongo-template-api
|
b82f444eb7daa2f7ee7c1ceb46046b7f2322b991
|
[
"Apache-2.0"
] | null | null | null |
schemas/error_response.py
|
victorgrubio/flask-mongo-template-api
|
b82f444eb7daa2f7ee7c1ceb46046b7f2322b991
|
[
"Apache-2.0"
] | null | null | null |
schemas/error_response.py
|
victorgrubio/flask-mongo-template-api
|
b82f444eb7daa2f7ee7c1ceb46046b7f2322b991
|
[
"Apache-2.0"
] | null | null | null |
from marshmallow import Schema, fields
| 27.2
| 66
| 0.779412
|
from marshmallow import Schema, fields
class ErrorResponse(Schema):
error = fields.String(description="Error returned by the API")
| 0
| 74
| 23
|
f184523890bcf9b76f0ab06d67dc6de838d0acf5
| 710
|
py
|
Python
|
pystratis/api/connectionmanager/requestmodels/addnoderequest.py
|
TjadenFroyda/pyStratis
|
9cc7620d7506637f8a2b84003d931eceb36ac5f2
|
[
"MIT"
] | 8
|
2021-06-30T20:44:22.000Z
|
2021-12-07T14:42:22.000Z
|
pystratis/api/connectionmanager/requestmodels/addnoderequest.py
|
TjadenFroyda/pyStratis
|
9cc7620d7506637f8a2b84003d931eceb36ac5f2
|
[
"MIT"
] | 2
|
2021-07-01T11:50:18.000Z
|
2022-01-25T18:39:49.000Z
|
pystratis/api/connectionmanager/requestmodels/addnoderequest.py
|
TjadenFroyda/pyStratis
|
9cc7620d7506637f8a2b84003d931eceb36ac5f2
|
[
"MIT"
] | 4
|
2021-07-01T04:36:42.000Z
|
2021-09-17T10:54:19.000Z
|
from pydantic import validator, Field
from pystratis.api import Model
# noinspection PyUnresolvedReferences
class AddNodeRequest(Model):
"""A request model for the connectionmanager/addnode endpoint.
Args:
ipaddr (str): The endpoint.
command (str): Allowed commands [add, remove, onetry]
"""
ipaddr: str = Field(alias='endpoint')
command: str
# noinspection PyMethodParameters,PyUnusedLocal
@validator('command')
| 26.296296
| 68
| 0.625352
|
from pydantic import validator, Field
from pystratis.api import Model
# noinspection PyUnresolvedReferences
class AddNodeRequest(Model):
"""A request model for the connectionmanager/addnode endpoint.
Args:
ipaddr (str): The endpoint.
command (str): Allowed commands [add, remove, onetry]
"""
ipaddr: str = Field(alias='endpoint')
command: str
# noinspection PyMethodParameters,PyUnusedLocal
@validator('command')
def validate_command(cls, v, values):
allowed = [
'add',
'remove',
'onetry'
]
if v not in allowed:
raise ValueError(f'Invalid command. Must be: {allowed}')
return v
| 223
| 0
| 26
|
3f510b14969b96c8ab32bc2699f596314fa605f0
| 61
|
py
|
Python
|
project.py
|
paraizofelipe/kong-wrapper
|
798917292cb089f98548af0098387c3a5d00f3ba
|
[
"BSD-3-Clause"
] | 1
|
2017-10-18T03:21:40.000Z
|
2017-10-18T03:21:40.000Z
|
project.py
|
paraizofelipe/kong-wrapper
|
798917292cb089f98548af0098387c3a5d00f3ba
|
[
"BSD-3-Clause"
] | null | null | null |
project.py
|
paraizofelipe/kong-wrapper
|
798917292cb089f98548af0098387c3a5d00f3ba
|
[
"BSD-3-Clause"
] | null | null | null |
import os
PATH = os.path.abspath(os.path.dirname(__file__))
| 15.25
| 49
| 0.754098
|
import os
PATH = os.path.abspath(os.path.dirname(__file__))
| 0
| 0
| 0
|
c79c2009cad0bd80495b4043b43b3b05c18be1dd
| 1,465
|
py
|
Python
|
django_popup_view_field/fields.py
|
g1sky/django-popup-view-field
|
69eec162e336ebc82deaa2910c870bb53b216ab4
|
[
"MIT"
] | null | null | null |
django_popup_view_field/fields.py
|
g1sky/django-popup-view-field
|
69eec162e336ebc82deaa2910c870bb53b216ab4
|
[
"MIT"
] | null | null | null |
django_popup_view_field/fields.py
|
g1sky/django-popup-view-field
|
69eec162e336ebc82deaa2910c870bb53b216ab4
|
[
"MIT"
] | null | null | null |
import urllib
from django.forms.fields import CharField
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from .exceptions import PopupViewIsNotSubclassView
from .widgets import PopupViewWidget
| 33.295455
| 94
| 0.662116
|
import urllib
from django.forms.fields import CharField
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from .exceptions import PopupViewIsNotSubclassView
from .widgets import PopupViewWidget
class PopupViewField(CharField):
def __init__(self, view_class, attrs=None, *args, **kwargs):
"""
view_class : View Class used to render content popup dialog
view_class must be subclass of django.views.generic.View
"""
# Check view_class inherit from django View
if not issubclass(view_class, View):
raise PopupViewIsNotSubclassView()
view_class_name = view_class.__name__
popup_dialog_title = kwargs.pop("popup_dialog_title", _("Popup Dialog: Select value"))
callback_data = kwargs.pop("callback_data", {})
if not isinstance(callback_data, dict):
raise AttributeError("callback_data argument must be a dictionary")
try:
callback_data = urllib.urlencode(callback_data)
except AttributeError:
callback_data = urllib.parse.urlencode(callback_data)
super(PopupViewField, self).__init__(
widget=PopupViewWidget(
view_class_name=view_class_name,
popup_dialog_title=popup_dialog_title,
callback_data=callback_data,
attrs=attrs
),
*args,
**kwargs
)
| 0
| 1,201
| 23
|
213f2cd6824b25ae045c553e6540cce858ad3289
| 3,084
|
py
|
Python
|
gen_page.py
|
scristall/poe_gen_gwennen
|
6579424a382ecfb332c4f5aa48ba3b789e1d8079
|
[
"MIT"
] | null | null | null |
gen_page.py
|
scristall/poe_gen_gwennen
|
6579424a382ecfb332c4f5aa48ba3b789e1d8079
|
[
"MIT"
] | null | null | null |
gen_page.py
|
scristall/poe_gen_gwennen
|
6579424a382ecfb332c4f5aa48ba3b789e1d8079
|
[
"MIT"
] | null | null | null |
from browser import document as doc
from browser.html import TABLE, TR, TH, TD, INPUT, SELECT, OPTION, DIV, BUTTON, SPAN, LI, H2, H3, IMG, COLGROUP, COL, P, SECTION, BR
from json import load
from last_update import time
# Create the static elements of the home page
init_page()
doc['loading'] <= DIV(Id='prerendered')
| 68.533333
| 345
| 0.690013
|
from browser import document as doc
from browser.html import TABLE, TR, TH, TD, INPUT, SELECT, OPTION, DIV, BUTTON, SPAN, LI, H2, H3, IMG, COLGROUP, COL, P, SECTION, BR
from json import load
from last_update import time
# Create the static elements of the home page
def init_page():
doc['time'].text = f"poe.ninja data last updated at {time} PST"
# selected
cst = SELECT(Id=f"hide_low_value", Class=f"save onehundred")
for s in ['hide', 'show']:
cst <= OPTION(s.capitalize(), value=s)
always_show = SELECT(Id=f"always_show", Class=f"save onehundred")
for s in ['show', 'hide']:
always_show <= OPTION(s.capitalize(), value=s)
min_val = INPUT(Type='number', min='0', step="1", value='20', Id="chaos_filter", Class='save')
t = TABLE(TR(TH() + TH('Selection')))
t <= TR(TD("Always show selected rows:", Class="right_text") + TD(always_show))
t <= TR(TD("Show low value items in row:", Class="right_text") + TD(cst))
t <= TR(TD("Minimum Chaos value to show:", Class="right_text") + TD(min_val))
t <= TR(TD("Keyword(s) Search:", Class="right_text") + TD(INPUT(Type='text', Id="keywords", Class='save') + BUTTON('x', Id='clear_keywords')))
doc['show_hide'] <= t + P("Hit enter or click outside the inputs to update the page. Items that are selected are only changed by using either button at the top of the list or clicking the box in the list.")
doc['show_hide'] <= P("Note that the keyword search overrides all other filter settings. Clear keyword search to use them. Search can display an empty row if it's the combination of two or more uniques on a base that matches the search terms. EG gold rim will show Viridian Jewel base because 2 separate uniques partially match the search.")
doc['show_hide'] <= DIV(BUTTON("Generate String", Id='generate') + " Will generate search strings based on all selected rows. This will cause many calculations and may take a bit to return a result")
doc['show_hide'] <= DIV("No strings generated yet.", Id="generated_strings", Class='sec_div grind')
doc['show_hide'] <= DIV(BUTTON("Select All Visible Only", Id='select_visible') + " This will deselect all hidden rows and select all visible rows.")
doc['show_hide'] <= BUTTON("Clear Selected", Id='clear_selected')
# Load and display league specific unique data
t = TABLE(TR(TH("Selected", Class='col_1') + TH("Base", Class='col_2') + TH("Item(s)")), Class="borders onehundred")
with open(f'{doc.query.getvalue("league", "sc")}_unique.json') as f:
data = load(f)
for base in data:
base_l = base.lower()
v = (DIV(IMG(src=x[2], alt=x[0], title=x[0], Class='item_icon', loading="lazy") + DIV(x[1], Class='bottom-right'), Class='container', data_value=x[1], data_search=f"{base_l}, {x[0].lower()}") for x in data[base])
searchstring = ', '.join([base_l] + [x[0].lower() for x in data[base]])
t <= TR(TD(INPUT(Id=f"check-{base_l.replace(' ', '_')}", type='checkbox', data_id=base_l, Class='save')) + TD(base) + TD(v), data_id=base_l, data_value=data[base][0][1], data_search=searchstring)
doc['items'] <= t
init_page()
doc['loading'] <= DIV(Id='prerendered')
| 2,740
| 0
| 22
|
5acacf353c21991d44914adbe24f3d385bb6a103
| 2,944
|
py
|
Python
|
pyoptflow/utils.py
|
juhi24/fmio-server
|
3add2a2faab06637b6cf0a4ed337ef62b8188e0f
|
[
"MIT"
] | null | null | null |
pyoptflow/utils.py
|
juhi24/fmio-server
|
3add2a2faab06637b6cf0a4ed337ef62b8188e0f
|
[
"MIT"
] | null | null | null |
pyoptflow/utils.py
|
juhi24/fmio-server
|
3add2a2faab06637b6cf0a4ed337ef62b8188e0f
|
[
"MIT"
] | 2
|
2017-10-28T18:41:40.000Z
|
2020-05-12T12:50:52.000Z
|
"""Conversion functions for weather radar and rainfall data."""
from numpy import isfinite, log, ubyte
from scipy.ndimage import gaussian_filter
from skimage.exposure import equalize_hist, rescale_intensity
def dBZ_to_ubyte(I, dBZ_min=-10.0, dBZ_max=50.0, filter_stddev=3.0):
"""Convert a dBZ field into a 8-bit image, as required by Optflow. Optionally,
apply a Gaussian smoothing filter.
Parameters
----------
I : array-like
The dBZ field.
dBZ_min : float
Minimum dBZ. Values smaller than dBZ_min are set to dBZ_min. If None,
dBZ_min is computed from I.
dBZ_max : float
Maximum dBZ. Values greater than dBZ_max are set to dBZ_max. If None,
dBZ_max is computed from I.
filter_stddev : float
Standard deviation of the Gaussian filter (0=no filtering)
Returns
-------
out : ndarray(dtype=ubyte)
The processed dBZ field.
"""
I = I.copy()
MASK = isfinite(I)
if dBZ_min == None:
dBZ_min = min(I[MASK])
if dBZ_max == None:
dBZ_max = max(I[MASK])
I[~MASK] = dBZ_min
I[I < dBZ_min] = dBZ_min
I[I > dBZ_max] = dBZ_max
if filter_stddev > 0.0:
I = gaussian_filter(I, filter_stddev, mode="reflect")
I = ((I - dBZ_min) / (dBZ_max - dBZ_min)) * 255.0
return I.astype(ubyte)
def rainfall_to_ubyte(I, R_min=0.1, R_max=40.0, filter_stddev=3.0, logtrans=False):
"""Convert a rainfall intensity field into a 8-bit image, as required by
Optflow. Optionally, apply a Gaussian smoothing filter.
Parameters
----------
I : array-like
The input rainfall field.
R_min : float
Minimum rainfall intensity. Values smaller than R_min are set to R_min.
If None, R_min is computed from I.
R_max : float
Maximum rainfall intensity. Values greater than R_max are set to R_max.
If None, R_max is computed from I.
filter_stddev : float
Standard deviation of the Gaussian filter (0=no filtering)
logtrans : bool
If True, apply a log-transform to the input rainfall field. In this case,
R_min must be nonzero.
Returns
-------
out : ndarray(dtype=ubyte)
The processed rainfall field.
"""
I = I.copy()
MASK = isfinite(I)
if R_min == None:
R_min = min(I[MASK])
if R_max == None:
R_max = max(I[MASK])
I[~MASK] = R_min
I[I < R_min] = R_min
I[I > R_max] = R_max
if logtrans == True:
if R_min == 0.0:
raise ValueError("R_min must be nonzero if log-transform is used")
I = log(I)
R_min = log(R_min)
R_max = log(R_max)
# TESTING
#I = rescale_intensity(I, (R_min, R_max), (0.0, 1.0))
#I = equalize_hist(I)
#I = ((I - min(I)) / (max(I) - min(I))) * 255.0
MASK = I > R_min
# TODO: Make the threshold 128 configurable.
I[MASK] = 128.0 + ((I[MASK] - R_min) / (R_max - R_min)) * (255.0 - 128.0)
I[~MASK] = 0.0
I = I.astype(ubyte)
if filter_stddev > 0.0:
I = gaussian_filter(I, filter_stddev, mode="reflect")
return I
| 27.259259
| 83
| 0.647079
|
"""Conversion functions for weather radar and rainfall data."""
from numpy import isfinite, log, ubyte
from scipy.ndimage import gaussian_filter
from skimage.exposure import equalize_hist, rescale_intensity
def dBZ_to_ubyte(I, dBZ_min=-10.0, dBZ_max=50.0, filter_stddev=3.0):
"""Convert a dBZ field into a 8-bit image, as required by Optflow. Optionally,
apply a Gaussian smoothing filter.
Parameters
----------
I : array-like
The dBZ field.
dBZ_min : float
Minimum dBZ. Values smaller than dBZ_min are set to dBZ_min. If None,
dBZ_min is computed from I.
dBZ_max : float
Maximum dBZ. Values greater than dBZ_max are set to dBZ_max. If None,
dBZ_max is computed from I.
filter_stddev : float
Standard deviation of the Gaussian filter (0=no filtering)
Returns
-------
out : ndarray(dtype=ubyte)
The processed dBZ field.
"""
I = I.copy()
MASK = isfinite(I)
if dBZ_min == None:
dBZ_min = min(I[MASK])
if dBZ_max == None:
dBZ_max = max(I[MASK])
I[~MASK] = dBZ_min
I[I < dBZ_min] = dBZ_min
I[I > dBZ_max] = dBZ_max
if filter_stddev > 0.0:
I = gaussian_filter(I, filter_stddev, mode="reflect")
I = ((I - dBZ_min) / (dBZ_max - dBZ_min)) * 255.0
return I.astype(ubyte)
def rainfall_to_ubyte(I, R_min=0.1, R_max=40.0, filter_stddev=3.0, logtrans=False):
"""Convert a rainfall intensity field into a 8-bit image, as required by
Optflow. Optionally, apply a Gaussian smoothing filter.
Parameters
----------
I : array-like
The input rainfall field.
R_min : float
Minimum rainfall intensity. Values smaller than R_min are set to R_min.
If None, R_min is computed from I.
R_max : float
Maximum rainfall intensity. Values greater than R_max are set to R_max.
If None, R_max is computed from I.
filter_stddev : float
Standard deviation of the Gaussian filter (0=no filtering)
logtrans : bool
If True, apply a log-transform to the input rainfall field. In this case,
R_min must be nonzero.
Returns
-------
out : ndarray(dtype=ubyte)
The processed rainfall field.
"""
I = I.copy()
MASK = isfinite(I)
if R_min == None:
R_min = min(I[MASK])
if R_max == None:
R_max = max(I[MASK])
I[~MASK] = R_min
I[I < R_min] = R_min
I[I > R_max] = R_max
if logtrans == True:
if R_min == 0.0:
raise ValueError("R_min must be nonzero if log-transform is used")
I = log(I)
R_min = log(R_min)
R_max = log(R_max)
# TESTING
#I = rescale_intensity(I, (R_min, R_max), (0.0, 1.0))
#I = equalize_hist(I)
#I = ((I - min(I)) / (max(I) - min(I))) * 255.0
MASK = I > R_min
# TODO: Make the threshold 128 configurable.
I[MASK] = 128.0 + ((I[MASK] - R_min) / (R_max - R_min)) * (255.0 - 128.0)
I[~MASK] = 0.0
I = I.astype(ubyte)
if filter_stddev > 0.0:
I = gaussian_filter(I, filter_stddev, mode="reflect")
return I
| 0
| 0
| 0
|
39ad90663cac888e7ef286d5a052d4d945b5475e
| 1,152
|
py
|
Python
|
scripts/fbx_importer/fbx_helper.py
|
tm8r/MayaFBXImporter
|
591bff828021e4ba03d05e9afc9016eaf2641967
|
[
"MIT"
] | 2
|
2019-10-25T17:11:33.000Z
|
2021-05-21T06:45:45.000Z
|
scripts/fbx_importer/fbx_helper.py
|
tm8r/MayaFBXImporter
|
591bff828021e4ba03d05e9afc9016eaf2641967
|
[
"MIT"
] | null | null | null |
scripts/fbx_importer/fbx_helper.py
|
tm8r/MayaFBXImporter
|
591bff828021e4ba03d05e9afc9016eaf2641967
|
[
"MIT"
] | 2
|
2020-05-13T18:07:02.000Z
|
2020-05-13T19:54:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from .vendor.Qt import QtWidgets
from .libs.maya import fbx
from .libs.maya import namespace
from . import history_helper
def import_fbx(path, import_mode, parent):
"""import fbx
Args:
path (unicode): path
import_mode (.libs.maya.fbx.FBXImportMode): import mode
parent (QtWidgets.QWidget): parent
"""
namespaces = namespace.get_namespaces(return_separator=True, return_root=True)
if len(namespaces) == 1:
fbx.import_fbx(path, import_mode, namespaces[0])
history_helper.add_recent_file(path)
return
ns, confirmed = QtWidgets.QInputDialog.getItem(parent,
"Select Namespace",
"Namespace",
namespaces,
0,
False)
if not confirmed:
return
fbx.import_fbx(path, import_mode, ns)
history_helper.add_recent_file(path)
| 31.135135
| 82
| 0.547743
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from .vendor.Qt import QtWidgets
from .libs.maya import fbx
from .libs.maya import namespace
from . import history_helper
def import_fbx(path, import_mode, parent):
"""import fbx
Args:
path (unicode): path
import_mode (.libs.maya.fbx.FBXImportMode): import mode
parent (QtWidgets.QWidget): parent
"""
namespaces = namespace.get_namespaces(return_separator=True, return_root=True)
if len(namespaces) == 1:
fbx.import_fbx(path, import_mode, namespaces[0])
history_helper.add_recent_file(path)
return
ns, confirmed = QtWidgets.QInputDialog.getItem(parent,
"Select Namespace",
"Namespace",
namespaces,
0,
False)
if not confirmed:
return
fbx.import_fbx(path, import_mode, ns)
history_helper.add_recent_file(path)
| 0
| 0
| 0
|
85bd09bc7858e6df3f0144f0d023684434a7438d
| 1,065
|
py
|
Python
|
app/routers/auth.py
|
JuanDM93/fcc-fastapi-demo
|
7d20f91fa96989d22426632c1ab2550f62898789
|
[
"MIT"
] | null | null | null |
app/routers/auth.py
|
JuanDM93/fcc-fastapi-demo
|
7d20f91fa96989d22426632c1ab2550f62898789
|
[
"MIT"
] | null | null | null |
app/routers/auth.py
|
JuanDM93/fcc-fastapi-demo
|
7d20f91fa96989d22426632c1ab2550f62898789
|
[
"MIT"
] | null | null | null |
from sqlalchemy import schema
from sqlalchemy.orm import Session
from fastapi import Depends, APIRouter, HTTPException, status
from fastapi.security.oauth2 import OAuth2PasswordRequestForm
from .. import db, models, schemas, utils, oauth2
router = APIRouter(
tags=["Authentication"],
)
@router.post("/login", response_model=schemas.Token)
| 32.272727
| 101
| 0.709859
|
from sqlalchemy import schema
from sqlalchemy.orm import Session
from fastapi import Depends, APIRouter, HTTPException, status
from fastapi.security.oauth2 import OAuth2PasswordRequestForm
from .. import db, models, schemas, utils, oauth2
router = APIRouter(
tags=["Authentication"],
)
@router.post("/login", response_model=schemas.Token)
def login(user_credentials: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(db.get_db)):
user = db.query(models.User).filter(
models.User.email == user_credentials.username).first()
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
)
if not utils.verify(user_credentials.password, user.password):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
)
acces_token = oauth2.create_access_token(data={"user_id": user.id})
return {"access_token": acces_token, "token_type": "bearer"}
| 695
| 0
| 22
|
714353cdd4eee160fe40085bfeaee7eb35bfec53
| 2,739
|
py
|
Python
|
agent/sn_agent/api/__init__.py
|
akolonin/singnet
|
3be30d40a2394325dc14bb55ea2871fe463b9405
|
[
"MIT"
] | null | null | null |
agent/sn_agent/api/__init__.py
|
akolonin/singnet
|
3be30d40a2394325dc14bb55ea2871fe463b9405
|
[
"MIT"
] | null | null | null |
agent/sn_agent/api/__init__.py
|
akolonin/singnet
|
3be30d40a2394325dc14bb55ea2871fe463b9405
|
[
"MIT"
] | 1
|
2020-10-27T01:32:15.000Z
|
2020-10-27T01:32:15.000Z
|
import logging
import os
from aiohttp import web, WSMsgType
from aiohttp.web_response import Response
from jsonrpcserver.aio import methods
from sn_agent import ontology
from sn_agent.api.job import can_perform_service, perform_job
from sn_agent.job.job_descriptor import JobDescriptor
from sn_agent.ontology.service_descriptor import ServiceDescriptor
logger = logging.getLogger(__name__)
WS_FILE = os.path.join(os.path.dirname(__file__), 'websocket.html')
@methods.add
@methods.add
| 26.852941
| 89
| 0.686017
|
import logging
import os
from aiohttp import web, WSMsgType
from aiohttp.web_response import Response
from jsonrpcserver.aio import methods
from sn_agent import ontology
from sn_agent.api.job import can_perform_service, perform_job
from sn_agent.job.job_descriptor import JobDescriptor
from sn_agent.ontology.service_descriptor import ServiceDescriptor
logger = logging.getLogger(__name__)
WS_FILE = os.path.join(os.path.dirname(__file__), 'websocket.html')
@methods.add
async def can_perform(service_node_id=None, context=None):
# figure out what we are being asked to perform and answer
service = ServiceDescriptor(service_node_id)
app = context
return await can_perform_service(app, service)
@methods.add
async def perform(service_node_id=None, job_params=None, context=None):
service_descriptor = ServiceDescriptor(service_node_id)
job = JobDescriptor(service_descriptor, job_params)
app = context
result = await perform_job(app, job)
logging.debug('Result of perform was %s', result)
return result
async def http_handler(request):
app = request.app
request_text = await request.text()
response = await methods.dispatch(request_text, app)
if response.is_notification:
return web.Response()
else:
return web.json_response(response, status=response.http_status)
async def ws_handler(request):
logger.debug('WebSocket Handler started')
app = request.app
resp = web.WebSocketResponse()
ok, protocol = resp.can_prepare(request)
if not ok:
with open(WS_FILE, 'rb') as fp:
return Response(body=fp.read(), content_type='text/html')
await resp.prepare(request)
logger.debug('WebSocket data received')
try:
request.app['sockets'].append(resp)
async for msg in resp:
logger.debug('Processing WebSocket message: %s', msg.type)
if msg.type == WSMsgType.TEXT:
response = await methods.dispatch(msg.data, app)
if not response.is_notification:
await resp.send_str(str(response))
elif msg.type == WSMsgType.ERROR:
logger.debug('ws connection closed with exception %s' % resp.exception())
else:
logger.debug("Unhandled message type")
return resp
return resp
finally:
request.app['sockets'].remove(resp)
logger.debug('Someone disconnected.')
async def on_shutdown(app):
for ws in app['sockets']:
await ws.close()
def setup_api(app):
app['sockets'] = []
app.router.add_post('/api', http_handler)
app.router.add_get('/api/ws', ws_handler)
app.on_shutdown.append(on_shutdown)
| 2,107
| 0
| 136
|
f67cc3a36214f247ea7458d7048481056a241c09
| 384
|
py
|
Python
|
Python_Ex_vazio/ex055.py
|
matheusmiguelsa/Exerc-cios-de-Python
|
53387266b747f79e67964356993b38c2267ac04a
|
[
"MIT"
] | null | null | null |
Python_Ex_vazio/ex055.py
|
matheusmiguelsa/Exerc-cios-de-Python
|
53387266b747f79e67964356993b38c2267ac04a
|
[
"MIT"
] | null | null | null |
Python_Ex_vazio/ex055.py
|
matheusmiguelsa/Exerc-cios-de-Python
|
53387266b747f79e67964356993b38c2267ac04a
|
[
"MIT"
] | null | null | null |
maior = 0
menor = 0
for p in range(1, 6):
peso = float(input(f'Digite a massa da {p}º pessoa em quilos: '))
if p == 1:
maior = peso
menor = peso
else:
if peso > maior:
maior = peso
if peso < menor:
menor = peso
print('O maior peso lido foi de {}Kg'.format(maior))
print('O menor peso lido foi de {}Kg'.format(menor))
| 27.428571
| 69
| 0.546875
|
maior = 0
menor = 0
for p in range(1, 6):
peso = float(input(f'Digite a massa da {p}º pessoa em quilos: '))
if p == 1:
maior = peso
menor = peso
else:
if peso > maior:
maior = peso
if peso < menor:
menor = peso
print('O maior peso lido foi de {}Kg'.format(maior))
print('O menor peso lido foi de {}Kg'.format(menor))
| 0
| 0
| 0
|
ef94fa3cdab88cb3c5513b937bac0106e56972d2
| 1,803
|
py
|
Python
|
userlixo/utils/patches.py
|
annihilatorrrr/UserLixo
|
d9d9bdcdab6c7489c41f2658b288e1f59674d3b3
|
[
"MIT"
] | 1
|
2022-03-28T15:38:27.000Z
|
2022-03-28T15:38:27.000Z
|
userlixo/utils/patches.py
|
Smartgirl2/UserLixo
|
73c900e0488239ff9330efb1cf9e939e7f43c496
|
[
"MIT"
] | null | null | null |
userlixo/utils/patches.py
|
Smartgirl2/UserLixo
|
73c900e0488239ff9330efb1cf9e939e7f43c496
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
from pyrogram import types
from pyrogram.helpers import bki, ikb
from userlixo.database import Message
| 30.05
| 86
| 0.693844
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
from pyrogram import types
from pyrogram.helpers import bki, ikb
from userlixo.database import Message
async def query_edit(
self, text: str, reply_markup=None, answer_kwargs={}, *args, **kwargs
):
try:
await self.answer(**answer_kwargs)
except BaseException:
pass
edit = await self.edit_message_text(
text=text, reply_markup=reply_markup, *args, **kwargs
)
return edit
def remove_keyboard(self, message_id=None, *args, **kwargs):
return self._client.edit_message_reply_markup(
self.chat.id, message_id or self.message_id, {}
)
async def edit_text(self, text: str, reply_markup=None, *args, **kwargs):
if type(reply_markup) == list:
reply_markup = ikb(reply_markup)
return await self._client.edit_message_text(
self.chat.id, self.message_id, text, reply_markup=reply_markup, **kwargs
)
async def reply_text(self, text: str, reply_markup=None, *args, **kwargs):
if not reply_markup or self._client.session_name == "bot":
return await self.reply_text(text, reply_markup=reply_markup, *args, **kwargs)
if type(reply_markup) == types.InlineKeyboardMarkup:
reply_markup = bki(reply_markup)
message = await Message.create(text=text, keyboard=reply_markup)
bot = self._client.assistant
inline_results = await self._client.get_inline_bot_results(
bot.me.username or bot.me.id, str(message.key)
)
result = inline_results.results[0]
reply_to = None
if kwargs.get("quote"):
reply_to = self.message_id
return await self._client.send_inline_bot_result(
self.chat.id,
inline_results.query_id,
result.id,
reply_to_message_id=reply_to,
)
| 1,534
| 0
| 92
|
beffdc1038d5ff506f4d888b134dc83884effb8f
| 1,245
|
py
|
Python
|
src/methodComparison/plotResultsGraph.py
|
UMCUGenetics/svMIL
|
b17f9b34702aac976dd5e233cb4e1ce051d19bbf
|
[
"MIT"
] | null | null | null |
src/methodComparison/plotResultsGraph.py
|
UMCUGenetics/svMIL
|
b17f9b34702aac976dd5e233cb4e1ce051d19bbf
|
[
"MIT"
] | null | null | null |
src/methodComparison/plotResultsGraph.py
|
UMCUGenetics/svMIL
|
b17f9b34702aac976dd5e233cb4e1ce051d19bbf
|
[
"MIT"
] | 1
|
2021-01-19T09:25:47.000Z
|
2021-01-19T09:25:47.000Z
|
import matplotlib.pyplot as plt
import sys
import os
outDir = sys.argv[1]
finalOutDir = outDir + '/figure3d/'
if not os.path.exists(finalOutDir):
os.makedirs(finalOutDir)
#make a plot showing the true positive and false positive rates of each method.
#each sv type will get its own icon, and methods can be labeled by color
methods = ['chrCV MIL', 'chrCV simple RF', 'VEP', 'SVScore']
methodColors = ['#0055d4ff', '#c83737ff', 'orange', '#808080ff']
#These are obtained from running the individual scripts for each method (see workflow.sh)
tprsDEL = [0.53, 0.56, 0.02, 0.009]
fprsDEL = [0.20, 0.56, 0.2, 0.09]
tprsDUP = [0.58, 0.45, 0.08, 0.03]
fprsDUP = [0.30, 0.46, 0.46, 0.08]
tprsINV = [0.60, 0.38, 0, 0.007]
fprsINV = [0.25, 0.37, 0, 0.03]
tprsITX = [0.62, 0.47, 0, 0]
fprsITX = [0.30, 0.43, 0, 0.02]
#make the scatter plot
plt.scatter(fprsDEL, tprsDEL, marker='.', facecolor=methodColors, edgecolor=methodColors)
plt.scatter(fprsDUP, tprsDUP, marker='s', facecolor=methodColors, edgecolor=methodColors)
plt.scatter(fprsINV, tprsINV, marker='^', facecolor=methodColors, edgecolor=methodColors)
plt.scatter(fprsITX, tprsITX, marker='*', facecolor=methodColors, edgecolor=methodColors)
plt.savefig(finalOutDir + '/tpr_fpr.svg')
| 31.125
| 89
| 0.711647
|
import matplotlib.pyplot as plt
import sys
import os
outDir = sys.argv[1]
finalOutDir = outDir + '/figure3d/'
if not os.path.exists(finalOutDir):
os.makedirs(finalOutDir)
#make a plot showing the true positive and false positive rates of each method.
#each sv type will get its own icon, and methods can be labeled by color
methods = ['chrCV MIL', 'chrCV simple RF', 'VEP', 'SVScore']
methodColors = ['#0055d4ff', '#c83737ff', 'orange', '#808080ff']
#These are obtained from running the individual scripts for each method (see workflow.sh)
tprsDEL = [0.53, 0.56, 0.02, 0.009]
fprsDEL = [0.20, 0.56, 0.2, 0.09]
tprsDUP = [0.58, 0.45, 0.08, 0.03]
fprsDUP = [0.30, 0.46, 0.46, 0.08]
tprsINV = [0.60, 0.38, 0, 0.007]
fprsINV = [0.25, 0.37, 0, 0.03]
tprsITX = [0.62, 0.47, 0, 0]
fprsITX = [0.30, 0.43, 0, 0.02]
#make the scatter plot
plt.scatter(fprsDEL, tprsDEL, marker='.', facecolor=methodColors, edgecolor=methodColors)
plt.scatter(fprsDUP, tprsDUP, marker='s', facecolor=methodColors, edgecolor=methodColors)
plt.scatter(fprsINV, tprsINV, marker='^', facecolor=methodColors, edgecolor=methodColors)
plt.scatter(fprsITX, tprsITX, marker='*', facecolor=methodColors, edgecolor=methodColors)
plt.savefig(finalOutDir + '/tpr_fpr.svg')
| 0
| 0
| 0
|
f9f3f26ea4b675f6359e66f744047dae89d21734
| 2,847
|
py
|
Python
|
madness/bracket.py
|
turtlebayjai/madness
|
19268ffe3fc20f048018656e5dd990ace8f5855a
|
[
"MIT"
] | null | null | null |
madness/bracket.py
|
turtlebayjai/madness
|
19268ffe3fc20f048018656e5dd990ace8f5855a
|
[
"MIT"
] | null | null | null |
madness/bracket.py
|
turtlebayjai/madness
|
19268ffe3fc20f048018656e5dd990ace8f5855a
|
[
"MIT"
] | null | null | null |
from collections import deque
from math import ceil, log
from division import Division
from picker import Picker
| 29.05102
| 68
| 0.527573
|
from collections import deque
from math import ceil, log
from division import Division
from picker import Picker
class BracketNode:
def __init__(self, team=None, left=None, right=None):
self.team = team
self.left = left
self.right = right
def play(self, picker):
if not self.left and not self.right:
return self.team
leftWinner = rightWinner = None
if self.left:
leftWinner = self.left.play(picker)
if self.right:
rightWinner = self.right.play(picker)
self.team = picker.pickWinner(leftWinner, rightWinner)
return self.team
def __str__(self):
string = ""
queue = deque([self])
while queue:
roundResult = "\n"
for i in range(len(queue)):
node = queue.popleft()
roundResult += str(node.team) + " | "
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
string += roundResult + "\n" + ("--" * 40)
return string
class Bracket:
def __init__(self, orderedDivisions, picker=None):
"""
orderedDivisions = [
Division (object),
Division (object),
...
],
picker: Picker (object)
"""
self.orderedDivisions = orderedDivisions
if not picker:
picker = Picker("simpleSeed")
self.picker = picker
def build(self, division):
leaves = deque(division.orderedTeams)
levels = int(ceil(log(len(leaves), 2)))
root = BracketNode()
queue = deque([root])
for i in range(levels - 1):
for j in range(len(queue)):
node = queue.popleft()
node.left, node.right = BracketNode(), BracketNode()
queue.append(node.left)
queue.append(node.right)
while queue:
node = queue.popleft()
node.left = BracketNode(team=leaves.popleft())
node.right = BracketNode(team=leaves.popleft())
return root
def simulate(self, quiet=False):
finalTeams = []
for division in self.orderedDivisions:
root = self.build(division)
finalTeams.append(root.play(self.picker))
if not quiet:
print(f"\n* {division.name} *")
print(root)
if not finalTeams:
return None
winner = finalTeams[0]
if len(finalTeams) > 1:
finals = Division(finalTeams, "Finals")
root = self.build(finals)
winner = root.play(self.picker)
if not quiet:
print(f"\n* {finals.name} *")
print(root)
return winner
| 2,189
| 416
| 126
|
48f5c233085c88dc7df004bb3107e6ba9e591625
| 3,203
|
py
|
Python
|
kicad_bom_seeedstudio.py
|
ahmetcumhurarslan/kicad-bom-seeedstudio
|
cdd97327dbdce0f143a272f82b4a8b05e0aabbfe
|
[
"Apache-2.0"
] | 19
|
2017-07-31T09:33:34.000Z
|
2021-05-19T03:18:11.000Z
|
kicad_bom_seeedstudio.py
|
ahmetcumhurarslan/kicad-bom-seeedstudio
|
cdd97327dbdce0f143a272f82b4a8b05e0aabbfe
|
[
"Apache-2.0"
] | 2
|
2017-10-19T22:03:33.000Z
|
2019-05-11T19:56:51.000Z
|
kicad_bom_seeedstudio.py
|
imrehg/kicad-bom-seeedstudio
|
cdd97327dbdce0f143a272f82b4a8b05e0aabbfe
|
[
"Apache-2.0"
] | 7
|
2017-10-19T20:38:24.000Z
|
2021-02-26T00:55:06.000Z
|
#!/usr/bin/env python3
import csv
import sys
import xml.etree.ElementTree as ET
### Natural key sorting for orders like : C1, C5, C10, C12 ... (instead of C1, C10, C12, C5...)
# http://stackoverflow.com/a/5967539
import re
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
###
def parse_kicad_xml(input_file):
"""Parse the KiCad XML file and look for the part designators
as done in the case of the official KiCad Open Parts Library:
* OPL parts are designated with "SKU" (preferred)
* other parts are designated with "MPN"
"""
components = {}
parts = {}
missing = []
tree = ET.parse(input_file)
root = tree.getroot()
for f in root.findall('./components/'):
name = f.attrib['ref']
info = {}
fields = f.find('fields')
opl, mpn = None, None
if fields is not None:
for x in fields:
if x.attrib['name'].upper() == 'SKU':
opl = x.text
elif x.attrib['name'].upper() == 'MPN':
mpn = x.text
if opl:
components[name] = opl
elif mpn:
components[name] = mpn
else:
missing += [name]
continue
if components[name] not in parts:
parts[components[name]] = []
parts[components[name]] += [name]
return components, missing
def write_bom_seeed(output_file_slug, components):
"""Write the BOM according to the Seeed Studio Fusion PCBA template available at:
https://statics3.seeedstudio.com/assets/file/fusion/bom_template_2016-08-18.csv
```
Part/Designator,Manufacture Part Number/Seeed SKU,Quantity
C1,RHA,1
"D1,D2",CC0603KRX7R9BB102,2
```
The output is a CSV file at the `output_file_slug`.csv location.
"""
parts = {}
for c in components:
if components[c] not in parts:
parts[components[c]] = []
parts[components[c]] += [c]
field_names = ['Part/Designator', 'Manufacture Part Number/Seeed SKU', 'Quantity']
with open("{}.csv".format(output_file_slug), 'w') as csvfile:
bomwriter = csv.DictWriter(csvfile, fieldnames=field_names, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
bomwriter.writeheader()
for p in sorted(parts.keys()):
pieces = sorted(parts[p], key=natural_keys)
designators = ",".join(pieces)
bomwriter.writerow({'Part/Designator': designators,
'Manufacture Part Number/Seeed SKU': p,
'Quantity': len(pieces)})
if __name__ == "__main__":
input_file = sys.argv[1]
output_file = sys.argv[2]
components, missing = parse_kicad_xml(input_file)
write_bom_seeed(output_file, components)
if len(missing) > 0:
print("** Warning **: there were parts with missing SKU/MFP")
print(missing)
| 33.020619
| 95
| 0.597877
|
#!/usr/bin/env python3
import csv
import sys
import xml.etree.ElementTree as ET
### Natural key sorting for orders like : C1, C5, C10, C12 ... (instead of C1, C10, C12, C5...)
# http://stackoverflow.com/a/5967539
import re
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
###
def parse_kicad_xml(input_file):
"""Parse the KiCad XML file and look for the part designators
as done in the case of the official KiCad Open Parts Library:
* OPL parts are designated with "SKU" (preferred)
* other parts are designated with "MPN"
"""
components = {}
parts = {}
missing = []
tree = ET.parse(input_file)
root = tree.getroot()
for f in root.findall('./components/'):
name = f.attrib['ref']
info = {}
fields = f.find('fields')
opl, mpn = None, None
if fields is not None:
for x in fields:
if x.attrib['name'].upper() == 'SKU':
opl = x.text
elif x.attrib['name'].upper() == 'MPN':
mpn = x.text
if opl:
components[name] = opl
elif mpn:
components[name] = mpn
else:
missing += [name]
continue
if components[name] not in parts:
parts[components[name]] = []
parts[components[name]] += [name]
return components, missing
def write_bom_seeed(output_file_slug, components):
"""Write the BOM according to the Seeed Studio Fusion PCBA template available at:
https://statics3.seeedstudio.com/assets/file/fusion/bom_template_2016-08-18.csv
```
Part/Designator,Manufacture Part Number/Seeed SKU,Quantity
C1,RHA,1
"D1,D2",CC0603KRX7R9BB102,2
```
The output is a CSV file at the `output_file_slug`.csv location.
"""
parts = {}
for c in components:
if components[c] not in parts:
parts[components[c]] = []
parts[components[c]] += [c]
field_names = ['Part/Designator', 'Manufacture Part Number/Seeed SKU', 'Quantity']
with open("{}.csv".format(output_file_slug), 'w') as csvfile:
bomwriter = csv.DictWriter(csvfile, fieldnames=field_names, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
bomwriter.writeheader()
for p in sorted(parts.keys()):
pieces = sorted(parts[p], key=natural_keys)
designators = ",".join(pieces)
bomwriter.writerow({'Part/Designator': designators,
'Manufacture Part Number/Seeed SKU': p,
'Quantity': len(pieces)})
if __name__ == "__main__":
input_file = sys.argv[1]
output_file = sys.argv[2]
components, missing = parse_kicad_xml(input_file)
write_bom_seeed(output_file, components)
if len(missing) > 0:
print("** Warning **: there were parts with missing SKU/MFP")
print(missing)
| 43
| 0
| 23
|
c6ff96beb7b31da28f265ad6877c8d563618606d
| 6,957
|
py
|
Python
|
miner/crawlers/event_crawler.py
|
HeavenDuke/GithubMiner
|
3d14c40c9cbdee6f22e7ade3493888aff708ad5b
|
[
"MIT"
] | 2
|
2017-05-22T10:31:15.000Z
|
2017-05-23T06:52:58.000Z
|
miner/crawlers/event_crawler.py
|
HeavenDuke/GithubMiner
|
3d14c40c9cbdee6f22e7ade3493888aff708ad5b
|
[
"MIT"
] | null | null | null |
miner/crawlers/event_crawler.py
|
HeavenDuke/GithubMiner
|
3d14c40c9cbdee6f22e7ade3493888aff708ad5b
|
[
"MIT"
] | null | null | null |
import wget
import gzip
import time as t
import json
import fileinput
import os
| 35.136364
| 108
| 0.531838
|
import wget
import gzip
import time as t
import json
import fileinput
import os
class Meta(object):
base_url = "http://data.githubarchive.org/{year}-{month}-{day}-{hour}.json.gz"
base_output = "./tmp/{year}-{month}-{day}-{hour}.json"
@classmethod
def construct(cls, year, month, day, hour):
url = cls.base_url
out = cls.base_output
url = url.replace("{year}", str(year))
url = url.replace("{month}", str(month))
url = url.replace("{day}", str(day))
url = url.replace("{hour}", str(hour))
out = out.replace("{year}", str(year))
out = out.replace("{month}", str(month))
out = out.replace("{day}", str(day))
out = out.replace("{hour}", str(hour))
return {"url": url, "out": out, "compressed": out + ".gz"}
class PackageRequester(object):
@classmethod
def fetch_package(cls, url, output = None):
wget.download(url = url, out = output)
class Unzip(object):
@classmethod
def unzip(cls, path, output):
g = gzip.GzipFile(filename = path, mode = "rb")
f = open(output, "wb")
f.write(g.read())
g.close()
f.close()
class Transformer(object):
def __init__(self):
pass
@classmethod
def transform(cls, graph, data, ar = ()):
for item in data.data:
if item["type"] == "MemberEvent":
if cls.is_legal_event(item, ar):
cls.parse_membership(data, item)
else:
pass
elif item["type"] == "WatchEvent":
if cls.is_legal_event(item, ar):
cls.parse_star(data, item)
else:
pass
elif item["type"] == "ForkEvent":
if cls.is_legal_event(item, ar):
cls.parse_fork(data, item)
else:
pass
elif item["type"] in ["PullRequestEvent", "PushEvent"]:
if cls.is_legal_event(item, ar):
cls.parse_contribute(data, item)
else:
pass
elif item["type"] in ["IssuesEvent", "IssueCommentEvent"]:
if cls.is_legal_event(item, ar):
cls.parse_issue(data, item)
else:
pass
cls.flush(graph, data)
@classmethod
def is_legal_event(cls, event, ar = ()):
return event["repo"]["id"] in ar
@classmethod
def parse_membership(cls, data, item):
cls.upsert_user(data, item["actor"])
cls.upsert_relationship(data, item["actor"], item["repo"], "Membership", item["created_at"])
return item["repo"], item["actor"]
@classmethod
def parse_fork(cls, data, item):
cls.upsert_user(data, item["actor"])
cls.upsert_relationship(data, item["actor"], item["repo"], "Fork", item["created_at"])
return item["repo"], item["actor"]
@classmethod
def parse_star(cls, data, item):
cls.upsert_user(data, item["actor"])
cls.upsert_relationship(data, item["actor"], item["repo"], "Star", item["created_at"])
return item["repo"], item["actor"]
@classmethod
def parse_issue(cls, data, item):
cls.upsert_user(data, item["actor"])
cls.upsert_relationship(data, item["actor"], item["repo"], "Issue", item["created_at"])
return item["repo"], item["actor"]
@classmethod
def parse_contribute(cls, data, item):
cls.upsert_user(data, item["actor"])
cls.upsert_relationship(data, item["actor"], item["repo"], "Contribute", item["created_at"])
return item["repo"], item["actor"]
@classmethod
def upsert_user(cls, data, user):
if user["id"] not in data.related_data:
data.related_data[user["id"]] = {
"value": {
"login": user["login"],
"user_id": user["id"],
"avatar_url": user["avatar_url"]
},
"repositories": {}
}
@classmethod
def upsert_relationship(cls, data, user, repository, label, time):
_time = t.mktime(t.strptime(time, "%Y-%m-%dT%H:%M:%SZ"))
_repo = data.related_data[user["id"]]["repositories"]
if repository["id"] not in _repo:
_repo[repository["id"]] = {}
if label not in _repo[repository["id"]]:
_repo[repository["id"]][label] = {}
if _time not in _repo[repository["id"]][label]:
_repo[repository["id"]][label][_time] = True
@classmethod
def flush(cls, graph, data):
def flush_events(graph, user, repository, events):
if len(events) == 0:
return
query = "MATCH (r:Repository {repository_id: %d})" % repository
query += " MERGE (u:User {user_id: %d})" % user["user_id"]
query += " SET u.login='%s', u.avatar_url='%s'" % (user["login"], user["avatar_url"])
query += " CREATE UNIQUE "
first = True
for label in events:
times = events[label]
for time in times:
if not first:
query += ","
query += "(u)-[:%s {created_at: %d, type: '%s'}]->(r)" % (label, time, label)
first = False
graph.run(query)
for uid in data.related_data:
repositories = data.related_data[uid]["repositories"]
uvalue = data.related_data[uid]["value"]
for rid in repositories:
flush_events(graph = graph, user = uvalue, repository = rid, events = repositories[rid])
class EventData(object):
def __init__(self, path):
self.data = []
self.related_data = {}
if path is not None:
for line in fileinput.input(path):
if line != "":
d = json.loads(line)
self.data.append(d)
else:
raise IOError("file not exist!")
class EventCrawler(object):
@classmethod
def crawl(cls, time, graph, ar = ()):
meta = Meta.construct(year = time.year, month = time.month, day = time.day, hour = time.hour)
try:
if not os.path.exists(meta["compressed"]):
PackageRequester.fetch_package(url = meta["url"], output = meta["compressed"])
if not os.path.exists(meta["out"]):
Unzip.unzip(path = meta["compressed"], output = meta["out"])
data = EventData(path = meta["out"])
Transformer.transform(graph = graph, data = data, ar = ar)
except:
print "Unable to crawl event data at %s-%s-%s %s" % (time.year, time.month, time.day, time.hour)
if os.path.exists(meta["compressed"]):
os.remove(meta["compressed"])
if os.path.exists(meta["out"]):
os.remove(meta["out"])
| 5,906
| 802
| 164
|
2a3ed8a00c44639bcf90af5b5d1068dde31685b4
| 61
|
py
|
Python
|
ace/samples/__init__.py
|
partofthething/ace
|
689d0caac3ba0708444be6ebf62627137b08ae46
|
[
"MIT"
] | 47
|
2015-04-29T06:52:03.000Z
|
2022-03-15T11:05:01.000Z
|
ace/samples/__init__.py
|
Jimmy-INL/ace
|
689d0caac3ba0708444be6ebf62627137b08ae46
|
[
"MIT"
] | 12
|
2015-05-29T15:21:25.000Z
|
2020-10-08T15:03:41.000Z
|
ace/samples/__init__.py
|
Jimmy-INL/ace
|
689d0caac3ba0708444be6ebf62627137b08ae46
|
[
"MIT"
] | 22
|
2015-06-02T17:30:35.000Z
|
2022-02-16T20:46:24.000Z
|
"""Sample ace and supersmoother problems from literature."""
| 30.5
| 60
| 0.770492
|
"""Sample ace and supersmoother problems from literature."""
| 0
| 0
| 0
|
c983fcf6ecba02fec1316fc2ea22d85c6f139bb0
| 662
|
py
|
Python
|
geoip.py
|
vlzx/outlier
|
4a8a30d7661472e18c9809c0e6f68f3e67389d93
|
[
"MIT"
] | null | null | null |
geoip.py
|
vlzx/outlier
|
4a8a30d7661472e18c9809c0e6f68f3e67389d93
|
[
"MIT"
] | null | null | null |
geoip.py
|
vlzx/outlier
|
4a8a30d7661472e18c9809c0e6f68f3e67389d93
|
[
"MIT"
] | null | null | null |
from threading import Lock
import geoip2.database
from util import resource_path
| 25.461538
| 85
| 0.63142
|
from threading import Lock
import geoip2.database
from util import resource_path
class SingletonMeta(type):
_instance = None
_lock = Lock()
def __call__(cls, *args, **kwargs):
with cls._lock:
if cls._instance is None:
cls._instance = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instance
class GeoIP(metaclass=SingletonMeta):
def __init__(self):
self.reader = geoip2.database.Reader(resource_path('GeoLite2-Country.mmdb'))
def country(self, ip: str) -> str:
data = self.reader.country(ip)
return data.country.iso_code
| 372
| 93
| 105
|
720bd2b1acb5d635f24a79aa2ddb890b5d2d825e
| 368
|
py
|
Python
|
stable_baselines/td3/__init__.py
|
iDurugkar/adversarial-intrinsic-motivation
|
e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2
|
[
"MIT"
] | 2
|
2022-03-11T15:26:00.000Z
|
2022-03-15T12:20:57.000Z
|
stable_baselines/td3/__init__.py
|
iDurugkar/adversarial-intrinsic-motivation
|
e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2
|
[
"MIT"
] | null | null | null |
stable_baselines/td3/__init__.py
|
iDurugkar/adversarial-intrinsic-motivation
|
e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2
|
[
"MIT"
] | null | null | null |
from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.td3.rnd import RND
from stable_baselines.td3.td3 import TD3
from stable_baselines.td3.dist_predictor import Predictor
from stable_baselines.td3.ddl_td3 import DDLTD3
from stable_baselines.td3.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
| 46
| 89
| 0.877717
|
from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.td3.rnd import RND
from stable_baselines.td3.td3 import TD3
from stable_baselines.td3.dist_predictor import Predictor
from stable_baselines.td3.ddl_td3 import DDLTD3
from stable_baselines.td3.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
| 0
| 0
| 0
|
1b8c0435205adcc1c2622b72a1f7168216808432
| 691
|
py
|
Python
|
passwordGenerator/generator/views.py
|
zahrakoohestani/passwordGenerator
|
c8a912ce159f04b488dabbbddcd8446672075be2
|
[
"MIT"
] | 1
|
2020-06-19T18:14:26.000Z
|
2020-06-19T18:14:26.000Z
|
passwordGenerator/generator/views.py
|
zahrakoohestani/passwordGenerator
|
c8a912ce159f04b488dabbbddcd8446672075be2
|
[
"MIT"
] | null | null | null |
passwordGenerator/generator/views.py
|
zahrakoohestani/passwordGenerator
|
c8a912ce159f04b488dabbbddcd8446672075be2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
import random
| 32.904762
| 79
| 0.688857
|
from django.shortcuts import render
import random
def home(request):
return render(request, 'generator/home.html')
def password(request):
characters=list('asdfghjklqwertyuiopzxcvbnm')
if request.GET.get('uppercase'):
characters.extend(list('ASDFGHJKLQWERTYUIOPZXCVBNM'))
if request.GET.get('symbols'):
characters.extend(list('!@#$%^&*()_+'))
if request.GET.get('numbers'):
characters.extend(list('1234567890'))
length=request.GET.get('length')
length=int(length)
thePassword=''
for i in range(length):
thePassword+=random.choice(characters)
return render(request, 'generator/password.html', {'password':thePassword})
| 595
| 0
| 46
|
e716e82f1750b35f58a08d3424c1fe6702b9bd50
| 8,630
|
py
|
Python
|
restApp/tests.py
|
ibamacsr/painelmma_api
|
a11a6cd63e312f09f445b139fcff8c11ab383764
|
[
"MIT"
] | null | null | null |
restApp/tests.py
|
ibamacsr/painelmma_api
|
a11a6cd63e312f09f445b139fcff8c11ab383764
|
[
"MIT"
] | null | null | null |
restApp/tests.py
|
ibamacsr/painelmma_api
|
a11a6cd63e312f09f445b139fcff8c11ab383764
|
[
"MIT"
] | null | null | null |
#from django.test import TestCase
from datetime import date
from decimal import *
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from .models import *
from .mommy_recipes import *
# def test_response(self):
# response = get_response(self.client, self.url, None)
# self.assertEqual(response.status_code, 200)
# def test_public_deter_response(self):
# public_deter_1.make()
# public_deter_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# def test_daily_deter_qualif_response(self):
# daily_deter_qualif_1.make()
# daily_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# self.assertEqual(response.status_code, 200)
# def test_public_deter_qualif_response(self):
# public_deter_qualif_1.make()
# public_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# def test_deter_awifs_response(self):
# deter_awifs_1.make()
# deter_awifs_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
| 37.359307
| 79
| 0.626188
|
#from django.test import TestCase
from datetime import date
from decimal import *
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from .models import *
from .mommy_recipes import *
def get_response(client, url, params):
return client.get(
url,
params,
format='json'
)
class TestDiarioAwifs(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 10, 'tipo': 'AWIFS'}
deter_awifs_1.make(data_imagem=date(2015, 10, 10))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
self.assertEqual(data_received[0]['dia'], 10)
self.assertEqual(data_received[0]['total'], Decimal('0.13'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.29)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.29'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.31)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.60'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('1.60'))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 2))
deter_awifs_2.make(data_imagem=date(2015, 11, 3), area_km2=1.2)
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 11, 'tipo': 'AWIFS'}
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 3)
self.assertEqual(response.data[0]['data'][0]['dia'], 1)
self.assertEqual(response.data[0]['data'][0]['total'], Decimal('1.64'))
self.assertEqual(response.data[0]['data'][1]['dia'], 2)
self.assertEqual(response.data[0]['data'][1]['total'], Decimal('0.82'))
self.assertEqual(response.data[0]['data'][2]['dia'], 3)
self.assertEqual(response.data[0]['data'][2]['total'], Decimal('1.2'))
class TestDiarioDeter(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
daily_deter_1.make(data_imagem=date(2015, 8, 1))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('0.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 1), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('1.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 9), area_km2=1.89)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[1]['dia']
area = data_received[1]['total']
self.assertEqual(len(data_received), 2)
self.assertEqual(day, 9)
self.assertEqual(area, Decimal('1.89'))
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 11), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=2)
daily_deter_1.make(data_imagem=date(2015, 8, 30), area_km2=2)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 5)
self.assertEqual(data_received[0]['dia'], 1)
self.assertEqual(data_received[1]['dia'], 9)
self.assertEqual(data_received[2]['dia'], 10)
self.assertEqual(data_received[3]['dia'], 11)
self.assertEqual(data_received[4]['dia'], 30)
self.assertEqual(data_received[0]['total'], Decimal('1.23'))
self.assertEqual(data_received[1]['total'], Decimal('1.89'))
self.assertEqual(data_received[2]['total'], Decimal('3'))
self.assertEqual(data_received[3]['total'], Decimal('1'))
self.assertEqual(data_received[4]['total'], Decimal('2'))
class TestDiarioQualif(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'BA', 'ano': 2013, 'mes': 9,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
class TestMontly(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-mensal')
# self.user = User.objects.create_user(
# 'test', 'test@test.com', 'password'
# )
# self.token = Token.objects.get(user=self.user)
# def test_response(self):
# response = get_response(self.client, self.url, None)
# self.assertEqual(response.status_code, 200)
def test_daily_deter_response(self):
daily_deter_1.make()
daily_deter_2.make()
response = self.client.post(
revese("api:login"),
{'username': 'test', 'password': 'password'},
format='json'
)
params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER'}
response = get_response(self.client, self.url, params)
self.assertEqual(response.status_code, 200)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
# def test_public_deter_response(self):
# public_deter_1.make()
# public_deter_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# def test_daily_deter_qualif_response(self):
# daily_deter_qualif_1.make()
# daily_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# self.assertEqual(response.status_code, 200)
# def test_public_deter_qualif_response(self):
# public_deter_qualif_1.make()
# public_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# def test_deter_awifs_response(self):
# deter_awifs_1.make()
# deter_awifs_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
| 6,320
| 52
| 385
|
c6f50421da170da4e7c1f28fd95dffc055cc033a
| 3,719
|
py
|
Python
|
apps/DeepFaceLive/ui/widgets/QBackendPanel.py
|
jkennedyvz/DeepFaceLive
|
274c20808da089eb7fc0fc0e8abe649379a29ffe
|
[
"MIT"
] | null | null | null |
apps/DeepFaceLive/ui/widgets/QBackendPanel.py
|
jkennedyvz/DeepFaceLive
|
274c20808da089eb7fc0fc0e8abe649379a29ffe
|
[
"MIT"
] | null | null | null |
apps/DeepFaceLive/ui/widgets/QBackendPanel.py
|
jkennedyvz/DeepFaceLive
|
274c20808da089eb7fc0fc0e8abe649379a29ffe
|
[
"MIT"
] | null | null | null |
from localization import L
from resources.fonts import QXFontDB
from resources.gfx import QXImageDB, QXImageSequenceDB
from xlib import qt as qtx
from ...backend import BackendHost
class QBackendPanel(qtx.QXWidget):
"""
Base panel for CSW backend
"""
| 39.989247
| 159
| 0.613606
|
from localization import L
from resources.fonts import QXFontDB
from resources.gfx import QXImageDB, QXImageSequenceDB
from xlib import qt as qtx
from ...backend import BackendHost
class QBackendPanel(qtx.QXWidget):
"""
Base panel for CSW backend
"""
def __init__(self, backend : BackendHost, name : str, layout, content_align_top=False):
super().__init__()
if not isinstance(backend, BackendHost):
raise ValueError('backend must be an instance of BackendHost')
self._backend = backend
self._name = name
backend.call_on_state_change(self._on_backend_state_change)
backend.call_on_profile_timing(self._on_backend_profile_timing)
btn_on_off = self._btn_on_off = qtx.QXPushButton(tooltip_text=L('@QBackendPanel.start'),
released=self._on_btn_on_off_released,
fixed_width=20)
btn_reset_state = self._btn_reset_state = qtx.QXPushButton(image=QXImageDB.settings_reset_outline('gray'),
released=self._on_btn_reset_state_released, tooltip_text=L('@QBackendPanel.reset_settings'),
fixed_width=20)
fps_label = self._fps_label = qtx.QXLabel()
bar_widget = self._bar_widget = \
qtx.QXFrameHBox(widgets=[btn_on_off, 1, btn_reset_state, 2,
qtx.QXLabel(name, font=QXFontDB.get_default_font(10)),
(fps_label, qtx.AlignRight), 2],
size_policy=('expanding', 'fixed'), fixed_height=24)
content_widget = self._content_widget = qtx.QXFrameHBox([layout], contents_margins=2, enabled=False)
l_widgets = [bar_widget, 1]
if not content_align_top:
l_widgets += [ qtx.QXFrame(size_policy=('expanding','expanding') ) ]
l_widgets += [content_widget]
l_widgets += [ qtx.QXFrame(size_policy=('expanding', 'expanding') ) ]
self.setLayout(qtx.QXVBoxLayout(l_widgets))
btn_on_off.set_image( QXImageDB.power_outline('red') )
def _on_backend_state_change(self, backend, started, starting, stopping, stopped, busy):
btn_on_off = self._btn_on_off
if started or starting or stopping:
btn_on_off.setToolTip(L('@QBackendPanel.stop'))
if stopped:
btn_on_off.setToolTip(L('@QBackendPanel.start'))
if busy or starting or stopping:
btn_on_off.set_image_sequence(QXImageSequenceDB.icon_loading('yellow'), loop_count=0)
elif started:
btn_on_off.set_image( QXImageDB.power_outline('lime') )
elif stopped:
btn_on_off.set_image( QXImageDB.power_outline('red') )
if started and not busy:
qtx.show_and_enable([self._content_widget, self._fps_label])
self._fps_label.setText(None)
else:
qtx.hide_and_disable([self._content_widget, self._fps_label])
self._fps_label.setText(None)
def _on_backend_profile_timing(self, timing : float):
fps = int(1.0 / timing if timing != 0 else 0)
if fps < 10:
self._fps_label.set_color('red')
else:
self._fps_label.set_color(None)
self._fps_label.setText(f"{fps} {L('@QBackendPanel.FPS')}")
def _on_btn_on_off_released(self):
backend = self._backend
if backend.is_stopped():
backend.start()
else:
backend.stop()
def _on_btn_reset_state_released(self):
self._backend.reset_state()
| 3,319
| 0
| 134
|
19f84f2a13776fdcf0bd9bd795f06cdd34f69809
| 443
|
py
|
Python
|
tests/hmc_test.py
|
dfm/rmhmc
|
df14344296250e54ef50cf065798b94ef6d641bc
|
[
"MIT"
] | 4
|
2021-09-24T00:12:52.000Z
|
2022-01-02T08:38:07.000Z
|
tests/hmc_test.py
|
dfm/rmhmc
|
df14344296250e54ef50cf065798b94ef6d641bc
|
[
"MIT"
] | null | null | null |
tests/hmc_test.py
|
dfm/rmhmc
|
df14344296250e54ef50cf065798b94ef6d641bc
|
[
"MIT"
] | null | null | null |
import jax.numpy as jnp
import numpy as np
from jax import random
from rmhmc.hmc import hmc
from .problems import banana
| 23.315789
| 64
| 0.715576
|
import jax.numpy as jnp
import numpy as np
from jax import random
from rmhmc.hmc import hmc
from .problems import banana
def test_divergence() -> None:
system = hmc(banana(False, False), initial_step_size=1000.0)
state = system.init(jnp.array([0.3, 0.5]))
state_ = system.step(state, random.PRNGKey(5))
assert state_[2].diverging
assert not state_[2].accept
np.testing.assert_allclose(state_[2].accept_prob, 0.0)
| 296
| 0
| 23
|
cc4b92f8c2b0fb7e78022602f5dcd214422a4189
| 27,874
|
py
|
Python
|
sdk/python/pulumi_rancher2/namespace.py
|
pulumi/pulumi-rancher2
|
7a98af8cf598b711084a7f46c0fe71b43ed7a8ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-03-23T15:59:11.000Z
|
2021-01-29T00:37:32.000Z
|
sdk/python/pulumi_rancher2/namespace.py
|
pulumi/pulumi-rancher2
|
7a98af8cf598b711084a7f46c0fe71b43ed7a8ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 76
|
2020-01-16T20:00:25.000Z
|
2022-03-31T20:30:08.000Z
|
sdk/python/pulumi_rancher2/namespace.py
|
pulumi/pulumi-rancher2
|
7a98af8cf598b711084a7f46c0fe71b43ed7a8ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-03-27T17:39:59.000Z
|
2020-11-24T23:09:24.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NamespaceArgs', 'Namespace']
@pulumi.input_type
@pulumi.input_type
| 44.958065
| 268
| 0.643431
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NamespaceArgs', 'Namespace']
@pulumi.input_type
class NamespaceArgs:
def __init__(__self__, *,
project_id: pulumi.Input[str],
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
container_resource_limit: Optional[pulumi.Input['NamespaceContainerResourceLimitArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_quota: Optional[pulumi.Input['NamespaceResourceQuotaArgs']] = None,
wait_for_cluster: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Namespace resource.
:param pulumi.Input[str] project_id: The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Pool object (map)
:param pulumi.Input['NamespaceContainerResourceLimitArgs'] container_resource_limit: Default containers resource limits on namespace (List maxitem:1)
:param pulumi.Input[str] description: A namespace description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Pool object (map)
:param pulumi.Input[str] name: The name of the namespace (string)
:param pulumi.Input['NamespaceResourceQuotaArgs'] resource_quota: Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
:param pulumi.Input[bool] wait_for_cluster: Wait for cluster becomes active. Default `false` (bool)
"""
pulumi.set(__self__, "project_id", project_id)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if container_resource_limit is not None:
pulumi.set(__self__, "container_resource_limit", container_resource_limit)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_quota is not None:
pulumi.set(__self__, "resource_quota", resource_quota)
if wait_for_cluster is not None:
pulumi.set(__self__, "wait_for_cluster", wait_for_cluster)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for Node Pool object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="containerResourceLimit")
def container_resource_limit(self) -> Optional[pulumi.Input['NamespaceContainerResourceLimitArgs']]:
"""
Default containers resource limits on namespace (List maxitem:1)
"""
return pulumi.get(self, "container_resource_limit")
@container_resource_limit.setter
def container_resource_limit(self, value: Optional[pulumi.Input['NamespaceContainerResourceLimitArgs']]):
pulumi.set(self, "container_resource_limit", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A namespace description (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for Node Pool object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the namespace (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceQuota")
def resource_quota(self) -> Optional[pulumi.Input['NamespaceResourceQuotaArgs']]:
"""
Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
"""
return pulumi.get(self, "resource_quota")
@resource_quota.setter
def resource_quota(self, value: Optional[pulumi.Input['NamespaceResourceQuotaArgs']]):
pulumi.set(self, "resource_quota", value)
@property
@pulumi.getter(name="waitForCluster")
def wait_for_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Wait for cluster becomes active. Default `false` (bool)
"""
return pulumi.get(self, "wait_for_cluster")
@wait_for_cluster.setter
def wait_for_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_cluster", value)
@pulumi.input_type
class _NamespaceState:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
container_resource_limit: Optional[pulumi.Input['NamespaceContainerResourceLimitArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_quota: Optional[pulumi.Input['NamespaceResourceQuotaArgs']] = None,
wait_for_cluster: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Namespace resources.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Pool object (map)
:param pulumi.Input['NamespaceContainerResourceLimitArgs'] container_resource_limit: Default containers resource limits on namespace (List maxitem:1)
:param pulumi.Input[str] description: A namespace description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Pool object (map)
:param pulumi.Input[str] name: The name of the namespace (string)
:param pulumi.Input[str] project_id: The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
:param pulumi.Input['NamespaceResourceQuotaArgs'] resource_quota: Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
:param pulumi.Input[bool] wait_for_cluster: Wait for cluster becomes active. Default `false` (bool)
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if container_resource_limit is not None:
pulumi.set(__self__, "container_resource_limit", container_resource_limit)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if resource_quota is not None:
pulumi.set(__self__, "resource_quota", resource_quota)
if wait_for_cluster is not None:
pulumi.set(__self__, "wait_for_cluster", wait_for_cluster)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for Node Pool object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="containerResourceLimit")
def container_resource_limit(self) -> Optional[pulumi.Input['NamespaceContainerResourceLimitArgs']]:
"""
Default containers resource limits on namespace (List maxitem:1)
"""
return pulumi.get(self, "container_resource_limit")
@container_resource_limit.setter
def container_resource_limit(self, value: Optional[pulumi.Input['NamespaceContainerResourceLimitArgs']]):
pulumi.set(self, "container_resource_limit", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A namespace description (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for Node Pool object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the namespace (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="resourceQuota")
def resource_quota(self) -> Optional[pulumi.Input['NamespaceResourceQuotaArgs']]:
"""
Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
"""
return pulumi.get(self, "resource_quota")
@resource_quota.setter
def resource_quota(self, value: Optional[pulumi.Input['NamespaceResourceQuotaArgs']]):
pulumi.set(self, "resource_quota", value)
@property
@pulumi.getter(name="waitForCluster")
def wait_for_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Wait for cluster becomes active. Default `false` (bool)
"""
return pulumi.get(self, "wait_for_cluster")
@wait_for_cluster.setter
def wait_for_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_cluster", value)
class Namespace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
container_resource_limit: Optional[pulumi.Input[pulumi.InputType['NamespaceContainerResourceLimitArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_quota: Optional[pulumi.Input[pulumi.InputType['NamespaceResourceQuotaArgs']]] = None,
wait_for_cluster: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a Rancher v2 Namespace resource. This can be used to create namespaces for Rancher v2 environments and retrieve their information.
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Namespace
foo = rancher2.Namespace("foo",
container_resource_limit=rancher2.NamespaceContainerResourceLimitArgs(
limits_cpu="20m",
limits_memory="20Mi",
requests_cpu="1m",
requests_memory="1Mi",
),
description="foo namespace",
project_id="<PROJECT_ID>",
resource_quota=rancher2.NamespaceResourceQuotaArgs(
limit=rancher2.NamespaceResourceQuotaLimitArgs(
limits_cpu="100m",
limits_memory="100Mi",
requests_storage="1Gi",
),
))
```
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Cluster
foo_custom = rancher2.Cluster("foo-custom",
description="Foo rancher2 custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
# Create a new rancher2 Namespace assigned to default cluster project
foo = rancher2.Namespace("foo",
project_id=foo_custom.default_project_id,
description="foo namespace",
resource_quota=rancher2.NamespaceResourceQuotaArgs(
limit=rancher2.NamespaceResourceQuotaLimitArgs(
limits_cpu="100m",
limits_memory="100Mi",
requests_storage="1Gi",
),
),
container_resource_limit=rancher2.NamespaceContainerResourceLimitArgs(
limits_cpu="20m",
limits_memory="20Mi",
requests_cpu="1m",
requests_memory="1Mi",
))
```
## Import
Namespaces can be imported using the namespace ID in the format `<project_id>.<namespace_id>`
```sh
$ pulumi import rancher2:index/namespace:Namespace foo <project_id>.<namespaces_id>
```
`<project_id>` is in the format `<cluster_id>:<id>`, but <id> part is optional:
- If full project_id is provided, `<project_id>=<cluster_id>:<id>`, the namespace'll be assigned to corresponding cluster project once it's imported.
- If `<id>` part is omitted `<project_id>=<cluster_id>`, the namespace'll not be assigned to any project. To move it into a project, `<project_id>=<cluster_id>:<id>` needs to be updated in tf file. Namespace movement is only supported inside same `cluster_id`.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Pool object (map)
:param pulumi.Input[pulumi.InputType['NamespaceContainerResourceLimitArgs']] container_resource_limit: Default containers resource limits on namespace (List maxitem:1)
:param pulumi.Input[str] description: A namespace description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Pool object (map)
:param pulumi.Input[str] name: The name of the namespace (string)
:param pulumi.Input[str] project_id: The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
:param pulumi.Input[pulumi.InputType['NamespaceResourceQuotaArgs']] resource_quota: Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
:param pulumi.Input[bool] wait_for_cluster: Wait for cluster becomes active. Default `false` (bool)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NamespaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Rancher v2 Namespace resource. This can be used to create namespaces for Rancher v2 environments and retrieve their information.
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Namespace
foo = rancher2.Namespace("foo",
container_resource_limit=rancher2.NamespaceContainerResourceLimitArgs(
limits_cpu="20m",
limits_memory="20Mi",
requests_cpu="1m",
requests_memory="1Mi",
),
description="foo namespace",
project_id="<PROJECT_ID>",
resource_quota=rancher2.NamespaceResourceQuotaArgs(
limit=rancher2.NamespaceResourceQuotaLimitArgs(
limits_cpu="100m",
limits_memory="100Mi",
requests_storage="1Gi",
),
))
```
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Cluster
foo_custom = rancher2.Cluster("foo-custom",
description="Foo rancher2 custom cluster",
rke_config=rancher2.ClusterRkeConfigArgs(
network=rancher2.ClusterRkeConfigNetworkArgs(
plugin="canal",
),
))
# Create a new rancher2 Namespace assigned to default cluster project
foo = rancher2.Namespace("foo",
project_id=foo_custom.default_project_id,
description="foo namespace",
resource_quota=rancher2.NamespaceResourceQuotaArgs(
limit=rancher2.NamespaceResourceQuotaLimitArgs(
limits_cpu="100m",
limits_memory="100Mi",
requests_storage="1Gi",
),
),
container_resource_limit=rancher2.NamespaceContainerResourceLimitArgs(
limits_cpu="20m",
limits_memory="20Mi",
requests_cpu="1m",
requests_memory="1Mi",
))
```
## Import
Namespaces can be imported using the namespace ID in the format `<project_id>.<namespace_id>`
```sh
$ pulumi import rancher2:index/namespace:Namespace foo <project_id>.<namespaces_id>
```
`<project_id>` is in the format `<cluster_id>:<id>`, but <id> part is optional:
- If full project_id is provided, `<project_id>=<cluster_id>:<id>`, the namespace'll be assigned to corresponding cluster project once it's imported.
- If `<id>` part is omitted `<project_id>=<cluster_id>`, the namespace'll not be assigned to any project. To move it into a project, `<project_id>=<cluster_id>:<id>` needs to be updated in tf file. Namespace movement is only supported inside same `cluster_id`.
:param str resource_name: The name of the resource.
:param NamespaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NamespaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
container_resource_limit: Optional[pulumi.Input[pulumi.InputType['NamespaceContainerResourceLimitArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_quota: Optional[pulumi.Input[pulumi.InputType['NamespaceResourceQuotaArgs']]] = None,
wait_for_cluster: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NamespaceArgs.__new__(NamespaceArgs)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["container_resource_limit"] = container_resource_limit
__props__.__dict__["description"] = description
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
__props__.__dict__["resource_quota"] = resource_quota
__props__.__dict__["wait_for_cluster"] = wait_for_cluster
super(Namespace, __self__).__init__(
'rancher2:index/namespace:Namespace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
container_resource_limit: Optional[pulumi.Input[pulumi.InputType['NamespaceContainerResourceLimitArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
resource_quota: Optional[pulumi.Input[pulumi.InputType['NamespaceResourceQuotaArgs']]] = None,
wait_for_cluster: Optional[pulumi.Input[bool]] = None) -> 'Namespace':
"""
Get an existing Namespace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Node Pool object (map)
:param pulumi.Input[pulumi.InputType['NamespaceContainerResourceLimitArgs']] container_resource_limit: Default containers resource limits on namespace (List maxitem:1)
:param pulumi.Input[str] description: A namespace description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Node Pool object (map)
:param pulumi.Input[str] name: The name of the namespace (string)
:param pulumi.Input[str] project_id: The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
:param pulumi.Input[pulumi.InputType['NamespaceResourceQuotaArgs']] resource_quota: Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
:param pulumi.Input[bool] wait_for_cluster: Wait for cluster becomes active. Default `false` (bool)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NamespaceState.__new__(_NamespaceState)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["container_resource_limit"] = container_resource_limit
__props__.__dict__["description"] = description
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["project_id"] = project_id
__props__.__dict__["resource_quota"] = resource_quota
__props__.__dict__["wait_for_cluster"] = wait_for_cluster
return Namespace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Annotations for Node Pool object (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter(name="containerResourceLimit")
def container_resource_limit(self) -> pulumi.Output[Optional['outputs.NamespaceContainerResourceLimit']]:
"""
Default containers resource limits on namespace (List maxitem:1)
"""
return pulumi.get(self, "container_resource_limit")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A namespace description (string)
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Labels for Node Pool object (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the namespace (string)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project id where assign namespace. It's on the form `project_id=<cluster_id>:<id>`. Updating `<id>` part on same `<cluster_id>` namespace will be moved between projects (string)
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="resourceQuota")
def resource_quota(self) -> pulumi.Output['outputs.NamespaceResourceQuota']:
"""
Resource quota for namespace. Rancher v2.1.x or higher (list maxitems:1)
"""
return pulumi.get(self, "resource_quota")
@property
@pulumi.getter(name="waitForCluster")
def wait_for_cluster(self) -> pulumi.Output[Optional[bool]]:
"""
Wait for cluster becomes active. Default `false` (bool)
"""
return pulumi.get(self, "wait_for_cluster")
| 4,055
| 23,291
| 67
|
04240f905ddfcb37b6a4655974a95272768d65ab
| 1,791
|
py
|
Python
|
cortex_DIM/evaluation_models/msssim_eval.py
|
Soapy-Salted-Fish-King/DIM
|
bac4765a8126746675f517c7bfa1b04b88044d51
|
[
"BSD-3-Clause"
] | 749
|
2018-08-24T13:55:34.000Z
|
2022-03-29T20:30:59.000Z
|
cortex_DIM/evaluation_models/msssim_eval.py
|
Soapy-Salted-Fish-King/DIM
|
bac4765a8126746675f517c7bfa1b04b88044d51
|
[
"BSD-3-Clause"
] | 50
|
2018-09-09T13:27:40.000Z
|
2022-01-25T16:45:28.000Z
|
cortex_DIM/evaluation_models/msssim_eval.py
|
Soapy-Salted-Fish-King/DIM
|
bac4765a8126746675f517c7bfa1b04b88044d51
|
[
"BSD-3-Clause"
] | 98
|
2018-08-24T15:55:23.000Z
|
2022-01-05T14:40:58.000Z
|
'''Encoder eval for MS-SSIM
'''
from cortex.main import run
from cortex_DIM.configs.deconvnets import configs as decoder_configs
from cortex_DIM.models.decoder import Decoder
class MSSSIMEval(Decoder):
'''Measure MS-SSIM through a decoder trained with reconstruction.
'''
defaults = dict(
data=dict(batch_size=dict(train=64, test=64),
inputs=dict(inputs='images'),
skip_last_batch=True),
optimizer=dict(learning_rate=1e-4,
scheduler='MultiStepLR',
scheduler_options=dict(milestones=[50, 100], gamma=0.1))
)
def build(self, encoder, config_,
task_idx=-1, config='basic32x32', args={}):
'''Builds MINE evaluator.
Args:
encoder_key: Dictionary key for the encoder.
task_idx: Index of output tensor to measure MS-SSIM.
config: Config name for decoder. See `configs` for details.
args: Arguments to update config with.
'''
self.nets.encoder = encoder
X = self.inputs('data.images')
self.task_idx = task_idx
out = self.nets.encoder(X, return_all_activations=True)[self.task_idx]
config = decoder_configs.get(config)
config.update(**args)
super().build(out.size()[1:], args=config)
if __name__ == '__main__':
run(MSSSIMEval())
| 29.360656
| 83
| 0.61474
|
'''Encoder eval for MS-SSIM
'''
from cortex.main import run
from cortex_DIM.configs.deconvnets import configs as decoder_configs
from cortex_DIM.models.decoder import Decoder
class MSSSIMEval(Decoder):
'''Measure MS-SSIM through a decoder trained with reconstruction.
'''
defaults = dict(
data=dict(batch_size=dict(train=64, test=64),
inputs=dict(inputs='images'),
skip_last_batch=True),
optimizer=dict(learning_rate=1e-4,
scheduler='MultiStepLR',
scheduler_options=dict(milestones=[50, 100], gamma=0.1))
)
def build(self, encoder, config_,
task_idx=-1, config='basic32x32', args={}):
'''Builds MINE evaluator.
Args:
encoder_key: Dictionary key for the encoder.
task_idx: Index of output tensor to measure MS-SSIM.
config: Config name for decoder. See `configs` for details.
args: Arguments to update config with.
'''
self.nets.encoder = encoder
X = self.inputs('data.images')
self.task_idx = task_idx
out = self.nets.encoder(X, return_all_activations=True)[self.task_idx]
config = decoder_configs.get(config)
config.update(**args)
super().build(out.size()[1:], args=config)
def routine(self, outs=None):
X = self.inputs('data.images')
if outs is None:
outs = self.nets.encoder(X, return_all_activations=True)
out = outs[self.task_idx]
super().routine(X, out.detach())
def visualize(self, inputs):
out = self.nets.encoder(inputs, return_all_activations=True)[self.task_idx]
super().visualize(out)
if __name__ == '__main__':
run(MSSSIMEval())
| 339
| 0
| 54
|
8d0910cd5960f52a9db19fe70f314beaa84f3f9b
| 3,742
|
py
|
Python
|
btsprice/yahoo.py
|
pch957/btsprice
|
8a6913dfc0d74e668e116855ea8bb1caf3af6c04
|
[
"MIT"
] | 18
|
2016-09-16T16:07:35.000Z
|
2020-08-03T13:14:56.000Z
|
btsprice/yahoo.py
|
roelandp/btsprice
|
ad2f4d6d694a4ac71d5b227a22731160f700323b
|
[
"MIT"
] | 5
|
2017-08-31T00:14:02.000Z
|
2019-10-18T12:44:22.000Z
|
btsprice/yahoo.py
|
roelandp/btsprice
|
ad2f4d6d694a4ac71d5b227a22731160f700323b
|
[
"MIT"
] | 20
|
2016-06-27T09:46:18.000Z
|
2020-10-26T05:17:47.000Z
|
# -*- coding: utf-8 -*-
import asyncio
import aiohttp
if __name__ == "__main__":
loop = asyncio.get_event_loop()
yahoo = Yahoo()
loop.run_until_complete(yahoo.fetch_price())
loop.run_forever()
| 35.638095
| 80
| 0.512293
|
# -*- coding: utf-8 -*-
import asyncio
import aiohttp
def is_float_try(str):
try:
float(str)
return True
except ValueError:
return False
class Yahoo(object):
def __init__(self):
header = {
'content-type': 'application/json',
'User-Agent': 'Mozilla/5.0 Gecko/20100101 Firefox/22.0'}
self.session = aiohttp.ClientSession(headers=header)
self.param_s = {}
self.quote = {}
self.scale = {}
self.init_param_dict1()
self.init_param_dict2()
self.init_param_dict3()
self.rate = {'CNY': {'CNY': 1.0}, 'USD': {'USD': 1.0}}
def init_param_dict1(self):
assets = ["CNY", "KRW", "TRY", "SGD", "HKD", "RUB", "SEK", "NZD",
"MXN", "CAD", "CHF", "AUD", "GBP", "JPY", "EUR", "BTC", "ARS"]
for asset in assets:
self.param_s[asset] = asset + "USD=X"
# todo, GOLD/SILVER wrong from yahoo
# self.param_s["GOLD"] = "XAUUSD=X"
# self.param_s["SILVER"] = "XAGUSD=X"
for asset in self.param_s:
self.quote[asset] = "USD"
def init_param_dict2(self):
# todo:"OIL", GAS", "DIESEL"
self.param_s["SHENZHEN"] = '399106.SZ'
self.quote["SHENZHEN"] = "CNY"
# todo, wrong from yahoo
# self.param_s["SHANGHAI"] = '000001.SS'
# self.quote["SHANGHAI"] = "CNY"
self.param_s["NASDAQC"] = '^IXIC'
self.quote["NASDAQC"] = "USD"
self.param_s["NIKKEI"] = '^N225'
self.quote["NIKKEI"] = "JPY"
self.param_s["HANGSENG"] = '^HSI'
self.quote["HANGSENG"] = "HKD"
def init_param_dict3(self):
self.param_s["BDR.AAPL"] = 'AAPL'
self.quote["BDR.AAPL"] = "USD"
self.scale["BDR.AAPL"] = 0.001
def get_query_param(self, assets):
query_string = ','.join(
'%s' % (self.param_s[asset]) for asset in assets)
params = {'s': query_string, 'f': 'l1', 'e': '.csv'}
return params
@asyncio.coroutine
def fetch_price(self, assets=None):
if assets is None:
assets = self.param_s.keys()
url = "http://download.finance.yahoo.com/d/quotes.csv"
try:
params = self.get_query_param(assets)
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
price = dict(zip(assets, response.split()))
for asset in assets:
if is_float_try(price[asset]):
scale = 1.0
if asset in self.scale:
scale = self.scale[asset]
if self.quote[asset] == "CNY":
self.rate["CNY"][asset] = float(price[asset])
elif self.quote[asset] == "USD":
self.rate["USD"][asset] = float(price[asset])
else:
self.rate["USD"][asset] = float(price[asset]) * \
float(price[self.quote[asset]]) * scale
# need throw a exception is not float
else:
raise
# there is a bug for yahoo api....
if asset == "GOLD" or asset == "SILVER":
if self.rate["USD"][asset] < 1:
self.rate["USD"][asset] = 1/self.rate["USD"][asset]
except Exception as e:
print("Error fetching results from yahoo!", e)
# print(self.rate)
return self.rate
if __name__ == "__main__":
loop = asyncio.get_event_loop()
yahoo = Yahoo()
loop.run_until_complete(yahoo.fetch_price())
loop.run_forever()
| 3,301
| 183
| 46
|
cd03445dfd6bb3e838191ac085b921cb783539d5
| 322
|
py
|
Python
|
divik/core/io/__init__.py
|
Hirni-Meshram/divik
|
0f542ec2669428458a4ecf6bb450dc90c33b0653
|
[
"Apache-2.0"
] | 10
|
2020-01-10T13:10:38.000Z
|
2022-03-17T05:08:40.000Z
|
divik/core/io/__init__.py
|
Hirni-Meshram/divik
|
0f542ec2669428458a4ecf6bb450dc90c33b0653
|
[
"Apache-2.0"
] | 45
|
2019-10-26T12:42:50.000Z
|
2022-03-12T07:50:40.000Z
|
divik/core/io/__init__.py
|
Hirni-Meshram/divik
|
0f542ec2669428458a4ecf6bb450dc90c33b0653
|
[
"Apache-2.0"
] | 5
|
2021-11-24T04:55:45.000Z
|
2021-12-17T23:38:19.000Z
|
"""Reusable utilities for data and model I/O"""
from ._data_io import (
load_data,
save_csv,
try_load_data,
try_load_xy,
)
from ._model_io import save, saver
DIVIK_RESULT_FNAME = "result.pkl"
__all__ = [
"load_data",
"save_csv",
"try_load_data",
"try_load_xy",
"save",
"saver",
]
| 16.1
| 47
| 0.636646
|
"""Reusable utilities for data and model I/O"""
from ._data_io import (
load_data,
save_csv,
try_load_data,
try_load_xy,
)
from ._model_io import save, saver
DIVIK_RESULT_FNAME = "result.pkl"
__all__ = [
"load_data",
"save_csv",
"try_load_data",
"try_load_xy",
"save",
"saver",
]
| 0
| 0
| 0
|
78b4aa6b5ba61bb43545a9d39e85f0a3741e827a
| 441
|
py
|
Python
|
client/asteroid.py
|
remremrem/EV-Tribute
|
c7dd412eedad4b8eba0cf2d4c95d539d4b80c852
|
[
"MIT"
] | 1
|
2015-06-23T03:48:03.000Z
|
2015-06-23T03:48:03.000Z
|
client/asteroid.py
|
remremrem/EV-Tribute
|
c7dd412eedad4b8eba0cf2d4c95d539d4b80c852
|
[
"MIT"
] | null | null | null |
client/asteroid.py
|
remremrem/EV-Tribute
|
c7dd412eedad4b8eba0cf2d4c95d539d4b80c852
|
[
"MIT"
] | null | null | null |
import pyglet
import rabbyt
from pyglet.window import key
from pyglet.window import mouse
from pyglet.gl import *
from tools import *
| 24.5
| 61
| 0.62585
|
import pyglet
import rabbyt
from pyglet.window import key
from pyglet.window import mouse
from pyglet.gl import *
from tools import *
class Asteroid:
def __init__(self,window,panel):
self.sprite = rabbyt.Sprite(texture = "asteroid.png")
self.sprite.xy = (0,0)
self.speed = 50
self.pos = [0, 0]
self.pos_time = 0
self.rps = .5
self.vector = 0
self.time = rabbyt.get_time()
| 264
| -6
| 49
|
76bd6d76686e284e16cd6ff1f9bb9c73128a6fce
| 843
|
py
|
Python
|
data_cleaning/create_png_dataset.py
|
bioinfoUQAM/Canadian-cropland-dataset
|
bfac01c80e20f6eb224e446e480b16dc5fed2547
|
[
"MIT"
] | 2
|
2021-09-15T02:36:53.000Z
|
2022-03-30T16:05:07.000Z
|
data_cleaning/create_png_dataset.py
|
bioinfoUQAM/Canadian-cropland-dataset
|
bfac01c80e20f6eb224e446e480b16dc5fed2547
|
[
"MIT"
] | null | null | null |
data_cleaning/create_png_dataset.py
|
bioinfoUQAM/Canadian-cropland-dataset
|
bfac01c80e20f6eb224e446e480b16dc5fed2547
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 22:41:09 2021
@author: amanda
"""
# loop through the .zip files and create images in .png format
# import necessary libraries
import os
import image_to_png
directory = "dataset_zip"
#file_extensions = ["OSAVI", "NDVI", "GNDVI", "PSRI", "NDVI45"]
extension = "OSAVI"
print("Extension: ", extension)
# crawling through directory and subdirectories
for root, directories, files in os.walk(directory):
for filename in files:
print("filname", filename)
# join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
print("Filepath: ", filepath)
""" For creating RGB images, no extension is required"""
#image_to_png.RGB_spliter(filepath)
image_to_png.three_channel_spliter(filepath, extension)
| 27.193548
| 66
| 0.688019
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 22:41:09 2021
@author: amanda
"""
# loop through the .zip files and create images in .png format
# import necessary libraries
import os
import image_to_png
directory = "dataset_zip"
#file_extensions = ["OSAVI", "NDVI", "GNDVI", "PSRI", "NDVI45"]
extension = "OSAVI"
print("Extension: ", extension)
# crawling through directory and subdirectories
for root, directories, files in os.walk(directory):
for filename in files:
print("filname", filename)
# join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
print("Filepath: ", filepath)
""" For creating RGB images, no extension is required"""
#image_to_png.RGB_spliter(filepath)
image_to_png.three_channel_spliter(filepath, extension)
| 0
| 0
| 0
|
4ce5ed3f89af7fd520b5af53b81ee7407fa15d83
| 938
|
py
|
Python
|
analytics/utils.py
|
iamnkc/tournesol
|
4a09985f494577917c357783a37dfae02c57fd82
|
[
"CC0-1.0"
] | null | null | null |
analytics/utils.py
|
iamnkc/tournesol
|
4a09985f494577917c357783a37dfae02c57fd82
|
[
"CC0-1.0"
] | null | null | null |
analytics/utils.py
|
iamnkc/tournesol
|
4a09985f494577917c357783a37dfae02c57fd82
|
[
"CC0-1.0"
] | null | null | null |
import pandas as pd
CRITERIA = [
"largely_recommended",
"reliability",
"importance",
"engaging",
"pedagogy",
"layman_friendly",
"entertaining_relaxing",
"better_habits",
"diversity_inclusion",
"backfire_risk",
]
TCOLOR = [
"#1282b2",
"#DC8A5D",
"#C28BED",
"#4C72D5",
"#4BB061",
"#D37A80",
"#DFC642",
"#76C6CB",
"#9DD654",
"#D8836D",
]
MSG_NO_DATA = "You should first load the public dataset at the top of the page."
def set_df(data, users=[]):
"""Set up the dataframe"""
df_tmp = pd.read_csv(data)
index = ["video_a", "video_b", "public_username"]
df = df_tmp.pivot(index=index, columns="criteria", values="score")
df.reset_index(inplace=True)
if users:
df = df[df["public_username"].isin(users)]
return df
| 18.038462
| 80
| 0.602345
|
import pandas as pd
CRITERIA = [
"largely_recommended",
"reliability",
"importance",
"engaging",
"pedagogy",
"layman_friendly",
"entertaining_relaxing",
"better_habits",
"diversity_inclusion",
"backfire_risk",
]
TCOLOR = [
"#1282b2",
"#DC8A5D",
"#C28BED",
"#4C72D5",
"#4BB061",
"#D37A80",
"#DFC642",
"#76C6CB",
"#9DD654",
"#D8836D",
]
MSG_NO_DATA = "You should first load the public dataset at the top of the page."
def set_df(data, users=[]):
"""Set up the dataframe"""
df_tmp = pd.read_csv(data)
index = ["video_a", "video_b", "public_username"]
df = df_tmp.pivot(index=index, columns="criteria", values="score")
df.reset_index(inplace=True)
if users:
df = df[df["public_username"].isin(users)]
return df
def get_unique_video_list(df):
return list(set(df["video_a"].tolist() + df["video_b"].tolist()))
| 80
| 0
| 23
|
e0249aa077ed0d6f12ca41eadfc35b890bcae4a2
| 21,271
|
py
|
Python
|
networking_bgpvpn/neutron/services/service_drivers/bagpipe/bagpipe.py
|
openstack/networking-bgpvpn
|
1789824ec90505d7d67c3b624d318c36b798fb12
|
[
"Apache-2.0"
] | 38
|
2015-06-23T08:06:16.000Z
|
2022-01-25T16:03:10.000Z
|
networking_bgpvpn/neutron/services/service_drivers/bagpipe/bagpipe.py
|
openstack/networking-bgpvpn
|
1789824ec90505d7d67c3b624d318c36b798fb12
|
[
"Apache-2.0"
] | null | null | null |
networking_bgpvpn/neutron/services/service_drivers/bagpipe/bagpipe.py
|
openstack/networking-bgpvpn
|
1789824ec90505d7d67c3b624d318c36b798fb12
|
[
"Apache-2.0"
] | 17
|
2015-11-28T00:45:22.000Z
|
2021-07-22T09:22:30.000Z
|
# Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db.models import l3
from neutron.db import models_v2
from neutron.debug import debug_agent
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as const
from neutron_lib.db import api as db_api
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_bagpipe.agent.bgpvpn import rpc_client
from networking_bgpvpn.neutron.db import bgpvpn_db
from networking_bgpvpn.neutron.services.common import utils
from networking_bgpvpn.neutron.services.service_drivers.bagpipe \
import bagpipe_v2 as v2
LOG = logging.getLogger(__name__)
@log_helpers.log_method_call
@db_api.CONTEXT_READER
def get_network_info_for_port(context, port_id, network_id):
"""Get MAC, IP and Gateway IP addresses informations for a specific port"""
try:
net_info = (context.session.
query(models_v2.Port.mac_address,
models_v2.IPAllocation.ip_address,
models_v2.Subnet.cidr,
models_v2.Subnet.gateway_ip).
join(models_v2.IPAllocation,
models_v2.IPAllocation.port_id ==
models_v2.Port.id).
join(models_v2.Subnet,
models_v2.IPAllocation.subnet_id ==
models_v2.Subnet.id).
filter(models_v2.Subnet.ip_version == 4).
filter(models_v2.Port.id == port_id).one())
(mac_address, ip_address, cidr, gateway_ip) = net_info
except orm.exc.NoResultFound:
return
gateway_mac = (
context.session.
query(models_v2.Port.mac_address).
filter(
models_v2.Port.network_id == network_id,
(models_v2.Port.device_owner ==
const.DEVICE_OWNER_ROUTER_INTF)
).
one_or_none()
)
return {'mac_address': mac_address,
'ip_address': ip_address + cidr[cidr.index('/'):],
'gateway_ip': gateway_ip,
'gateway_mac': gateway_mac[0] if gateway_mac else None}
@db_api.CONTEXT_READER
@db_api.CONTEXT_READER
@db_api.CONTEXT_READER
@db_api.CONTEXT_READER
@db_api.CONTEXT_READER
@db_api.CONTEXT_READER
@db_api.CONTEXT_READER
@registry.has_registry_receivers
class BaGPipeBGPVPNDriver(v2.BaGPipeBGPVPNDriver):
"""BGPVPN Service Driver class for BaGPipe"""
def _format_bgpvpn(self, context, bgpvpn, network_id):
"""JSON-format BGPVPN
BGPVPN, network identifiers, and route targets.
"""
formatted_bgpvpn = {'id': bgpvpn['id'],
'network_id': network_id,
'gateway_mac': get_gateway_mac(context,
network_id)}
formatted_bgpvpn.update(
self._format_bgpvpn_network_route_targets([bgpvpn]))
return formatted_bgpvpn
def _format_bgpvpn_network_route_targets(self, bgpvpns):
"""Format BGPVPN network informations (VPN type and route targets)
[{
'type': 'l3',
'route_targets': ['12345:1', '12345:2'],
'import_targets': ['12345:3'],
'export_targets': ['12345:4']
},
{
'type': 'l3',
'route_targets': ['12346:1']
},
{
'type': 'l2',
'route_targets': ['12347:1']
}
]
to
{
'l3vpn' : {
'import_rt': ['12345:1', '12345:2', '12345:3', '12346:1'],
'export_rt': ['12345:1', '12345:2', '12345:4', '12346:1']
},
'l2vpn' : {
'import_rt': ['12347:1'],
'export_rt': ['12347:1']
}
}
"""
bgpvpn_rts = {}
for bgpvpn in bgpvpns:
# Add necessary keys to BGP VPN route targets dictionary
if bgpvpn['type'] + 'vpn' not in bgpvpn_rts:
bgpvpn_rts.update(
{bgpvpn['type'] + 'vpn': {'import_rt': [],
'export_rt': []}}
)
if 'route_targets' in bgpvpn:
bgpvpn_rts[bgpvpn['type'] + 'vpn']['import_rt'] += (
bgpvpn['route_targets']
)
bgpvpn_rts[bgpvpn['type'] + 'vpn']['export_rt'] += (
bgpvpn['route_targets']
)
if 'import_targets' in bgpvpn:
bgpvpn_rts[bgpvpn['type'] + 'vpn']['import_rt'] += (
bgpvpn['import_targets']
)
if 'export_targets' in bgpvpn:
bgpvpn_rts[bgpvpn['type'] + 'vpn']['export_rt'] += (
bgpvpn['export_targets']
)
for attribute in ('import_rt', 'export_rt'):
if bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute]:
bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute] = list(
set(bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute]))
return bgpvpn_rts
def _retrieve_bgpvpn_network_info_for_port(self, context, port):
"""Retrieve BGP VPN network informations for a specific port
{
'network_id': <UUID>,
'mac_address': '00:00:de:ad:be:ef',
'ip_address': '10.0.0.2',
'gateway_ip': '10.0.0.1',
'gateway_mac': 'aa:bb:cc:dd:ee:ff', # if a router interface exists
'l3vpn' : {
'import_rt': ['12345:1', '12345:2', '12345:3'],
'export_rt': ['12345:1', '12345:2', '12345:4']
}
}
"""
port_id = port['id']
network_id = port['network_id']
bgpvpn_network_info = {}
bgpvpns = self._bgpvpns_for_network(context, network_id)
# NOTE(tmorin): We currently need to send 'network_id', 'mac_address',
# 'ip_address', 'gateway_ip' to the agent, even in the absence of
# a BGPVPN bound to the port. If we don't this information will
# lack on an update_bgpvpn RPC. When the agent will have the ability
# to retrieve this info by itself, we'll change this method
# to return {} if there is no bound bgpvpn.
bgpvpn_rts = self._format_bgpvpn_network_route_targets(bgpvpns)
LOG.debug("Port connected on BGPVPN network %s with route targets "
"%s" % (network_id, bgpvpn_rts))
bgpvpn_network_info.update(bgpvpn_rts)
LOG.debug("Getting port %s network details" % port_id)
network_info = get_network_info_for_port(context, port_id, network_id)
if not network_info:
LOG.warning("No network information for net %s", network_id)
return
bgpvpn_network_info.update(network_info)
return bgpvpn_network_info
@db_api.CONTEXT_READER
@log_helpers.log_method_call
@log_helpers.log_method_call
@log_helpers.log_method_call
@log_helpers.log_method_call
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
@log_helpers.log_method_call
@registry.receives(resources.PORT, [events.AFTER_DELETE])
@log_helpers.log_method_call
# contrary to mother class, no need to subscribe to router interface
# before-delete, because after delete, we still can generate RPCs
@registry.receives(resources.ROUTER_INTERFACE, [events.AFTER_DELETE])
@log_helpers.log_method_call
| 38.744991
| 79
| 0.593249
|
# Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db.models import l3
from neutron.db import models_v2
from neutron.debug import debug_agent
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as const
from neutron_lib.db import api as db_api
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_bagpipe.agent.bgpvpn import rpc_client
from networking_bgpvpn.neutron.db import bgpvpn_db
from networking_bgpvpn.neutron.services.common import utils
from networking_bgpvpn.neutron.services.service_drivers.bagpipe \
import bagpipe_v2 as v2
LOG = logging.getLogger(__name__)
@log_helpers.log_method_call
@db_api.CONTEXT_READER
def get_network_info_for_port(context, port_id, network_id):
"""Get MAC, IP and Gateway IP addresses informations for a specific port"""
try:
net_info = (context.session.
query(models_v2.Port.mac_address,
models_v2.IPAllocation.ip_address,
models_v2.Subnet.cidr,
models_v2.Subnet.gateway_ip).
join(models_v2.IPAllocation,
models_v2.IPAllocation.port_id ==
models_v2.Port.id).
join(models_v2.Subnet,
models_v2.IPAllocation.subnet_id ==
models_v2.Subnet.id).
filter(models_v2.Subnet.ip_version == 4).
filter(models_v2.Port.id == port_id).one())
(mac_address, ip_address, cidr, gateway_ip) = net_info
except orm.exc.NoResultFound:
return
gateway_mac = (
context.session.
query(models_v2.Port.mac_address).
filter(
models_v2.Port.network_id == network_id,
(models_v2.Port.device_owner ==
const.DEVICE_OWNER_ROUTER_INTF)
).
one_or_none()
)
return {'mac_address': mac_address,
'ip_address': ip_address + cidr[cidr.index('/'):],
'gateway_ip': gateway_ip,
'gateway_mac': gateway_mac[0] if gateway_mac else None}
@db_api.CONTEXT_READER
def get_gateway_mac(context, network_id):
gateway_mac = (
context.session.
query(models_v2.Port.mac_address).
filter(
models_v2.Port.network_id == network_id,
(models_v2.Port.device_owner ==
const.DEVICE_OWNER_ROUTER_INTF)
).
one_or_none()
)
return gateway_mac[0] if gateway_mac else None
@db_api.CONTEXT_READER
def get_network_ports(context, network_id):
# NOTE(tmorin): currents callers don't look at detailed results
# but only test if at least one result exist => can be optimized
# by returning a count, rather than all port information
return (context.session.query(models_v2.Port).
filter(models_v2.Port.network_id == network_id,
models_v2.Port.admin_state_up == sql.true()).all())
@db_api.CONTEXT_READER
def get_router_ports(context, router_id):
return (
context.session.query(models_v2.Port).
filter(
models_v2.Port.device_id == router_id,
models_v2.Port.device_owner == const.DEVICE_OWNER_ROUTER_INTF
).all()
)
@db_api.CONTEXT_READER
def get_router_bgpvpn_assocs(context, router_id):
return (
context.session.query(bgpvpn_db.BGPVPNRouterAssociation).
filter(
bgpvpn_db.BGPVPNRouterAssociation.router_id == router_id
).all()
)
@db_api.CONTEXT_READER
def get_network_bgpvpn_assocs(context, net_id):
return (
context.session.query(bgpvpn_db.BGPVPNNetAssociation).
filter(
bgpvpn_db.BGPVPNNetAssociation.network_id == net_id
).all()
)
@db_api.CONTEXT_READER
def get_bgpvpns_of_router_assocs_by_network(context, net_id):
return (
context.session.query(bgpvpn_db.BGPVPN).
join(bgpvpn_db.BGPVPN.router_associations).
join(bgpvpn_db.BGPVPNRouterAssociation.router).
join(l3.Router.attached_ports).
join(l3.RouterPort.port).
filter(
models_v2.Port.network_id == net_id
).all()
)
@db_api.CONTEXT_READER
def get_networks_for_router(context, router_id):
ports = get_router_ports(context, router_id)
if ports:
return {port['network_id'] for port in ports}
else:
return []
def _log_callback_processing_exception(resource, event, trigger, kwargs, e):
LOG.exception("Error during notification processing "
"%(resource)s %(event)s, %(trigger)s, "
"%(kwargs)s: %(exc)s",
{'trigger': trigger,
'resource': resource,
'event': event,
'kwargs': kwargs,
'exc': e})
@registry.has_registry_receivers
class BaGPipeBGPVPNDriver(v2.BaGPipeBGPVPNDriver):
"""BGPVPN Service Driver class for BaGPipe"""
def __init__(self, service_plugin):
super(BaGPipeBGPVPNDriver, self).__init__(service_plugin)
self.agent_rpc = rpc_client.BGPVPNAgentNotifyApi()
def _format_bgpvpn(self, context, bgpvpn, network_id):
"""JSON-format BGPVPN
BGPVPN, network identifiers, and route targets.
"""
formatted_bgpvpn = {'id': bgpvpn['id'],
'network_id': network_id,
'gateway_mac': get_gateway_mac(context,
network_id)}
formatted_bgpvpn.update(
self._format_bgpvpn_network_route_targets([bgpvpn]))
return formatted_bgpvpn
def _format_bgpvpn_network_route_targets(self, bgpvpns):
"""Format BGPVPN network informations (VPN type and route targets)
[{
'type': 'l3',
'route_targets': ['12345:1', '12345:2'],
'import_targets': ['12345:3'],
'export_targets': ['12345:4']
},
{
'type': 'l3',
'route_targets': ['12346:1']
},
{
'type': 'l2',
'route_targets': ['12347:1']
}
]
to
{
'l3vpn' : {
'import_rt': ['12345:1', '12345:2', '12345:3', '12346:1'],
'export_rt': ['12345:1', '12345:2', '12345:4', '12346:1']
},
'l2vpn' : {
'import_rt': ['12347:1'],
'export_rt': ['12347:1']
}
}
"""
bgpvpn_rts = {}
for bgpvpn in bgpvpns:
# Add necessary keys to BGP VPN route targets dictionary
if bgpvpn['type'] + 'vpn' not in bgpvpn_rts:
bgpvpn_rts.update(
{bgpvpn['type'] + 'vpn': {'import_rt': [],
'export_rt': []}}
)
if 'route_targets' in bgpvpn:
bgpvpn_rts[bgpvpn['type'] + 'vpn']['import_rt'] += (
bgpvpn['route_targets']
)
bgpvpn_rts[bgpvpn['type'] + 'vpn']['export_rt'] += (
bgpvpn['route_targets']
)
if 'import_targets' in bgpvpn:
bgpvpn_rts[bgpvpn['type'] + 'vpn']['import_rt'] += (
bgpvpn['import_targets']
)
if 'export_targets' in bgpvpn:
bgpvpn_rts[bgpvpn['type'] + 'vpn']['export_rt'] += (
bgpvpn['export_targets']
)
for attribute in ('import_rt', 'export_rt'):
if bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute]:
bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute] = list(
set(bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute]))
return bgpvpn_rts
def _bgpvpns_for_network(self, context, network_id):
return (
self.bgpvpn_db.get_bgpvpns(
context,
filters={
'networks': [network_id],
},
) or self.retrieve_bgpvpns_of_router_assocs_by_network(context,
network_id)
)
def _networks_for_bgpvpn(self, context, bgpvpn):
networks = []
networks.extend(bgpvpn['networks'])
for router_id in bgpvpn['routers']:
networks.extend(get_networks_for_router(context, router_id))
return list(set(networks))
def _retrieve_bgpvpn_network_info_for_port(self, context, port):
"""Retrieve BGP VPN network informations for a specific port
{
'network_id': <UUID>,
'mac_address': '00:00:de:ad:be:ef',
'ip_address': '10.0.0.2',
'gateway_ip': '10.0.0.1',
'gateway_mac': 'aa:bb:cc:dd:ee:ff', # if a router interface exists
'l3vpn' : {
'import_rt': ['12345:1', '12345:2', '12345:3'],
'export_rt': ['12345:1', '12345:2', '12345:4']
}
}
"""
port_id = port['id']
network_id = port['network_id']
bgpvpn_network_info = {}
bgpvpns = self._bgpvpns_for_network(context, network_id)
# NOTE(tmorin): We currently need to send 'network_id', 'mac_address',
# 'ip_address', 'gateway_ip' to the agent, even in the absence of
# a BGPVPN bound to the port. If we don't this information will
# lack on an update_bgpvpn RPC. When the agent will have the ability
# to retrieve this info by itself, we'll change this method
# to return {} if there is no bound bgpvpn.
bgpvpn_rts = self._format_bgpvpn_network_route_targets(bgpvpns)
LOG.debug("Port connected on BGPVPN network %s with route targets "
"%s" % (network_id, bgpvpn_rts))
bgpvpn_network_info.update(bgpvpn_rts)
LOG.debug("Getting port %s network details" % port_id)
network_info = get_network_info_for_port(context, port_id, network_id)
if not network_info:
LOG.warning("No network information for net %s", network_id)
return
bgpvpn_network_info.update(network_info)
return bgpvpn_network_info
@db_api.CONTEXT_READER
def retrieve_bgpvpns_of_router_assocs_by_network(self, context,
network_id):
return [self.bgpvpn_db._make_bgpvpn_dict(bgpvpn) for bgpvpn in
get_bgpvpns_of_router_assocs_by_network(context, network_id)]
def delete_bgpvpn_postcommit(self, context, bgpvpn):
for net_id in self._networks_for_bgpvpn(context, bgpvpn):
if get_network_ports(context, net_id):
# Format BGPVPN before sending notification
self.agent_rpc.delete_bgpvpn(
context,
self._format_bgpvpn(context, bgpvpn, net_id))
def update_bgpvpn_postcommit(self, context, old_bgpvpn, bgpvpn):
super(BaGPipeBGPVPNDriver, self).update_bgpvpn_postcommit(
context, old_bgpvpn, bgpvpn)
(added_keys, removed_keys, changed_keys) = (
utils.get_bgpvpn_differences(bgpvpn, old_bgpvpn))
ATTRIBUTES_TO_IGNORE = set('name')
moving_keys = added_keys | removed_keys | changed_keys
if len(moving_keys ^ ATTRIBUTES_TO_IGNORE):
for net_id in self._networks_for_bgpvpn(context, bgpvpn):
if (get_network_ports(context, net_id)):
self._update_bgpvpn_for_network(context, net_id, bgpvpn)
def _update_bgpvpn_for_net_with_id(self, context, network_id, bgpvpn_id):
if get_network_ports(context, network_id):
bgpvpn = self.get_bgpvpn(context, bgpvpn_id)
self._update_bgpvpn_for_network(context, network_id, bgpvpn)
def _update_bgpvpn_for_network(self, context, net_id, bgpvpn):
formated_bgpvpn = self._format_bgpvpn(context, bgpvpn, net_id)
self.agent_rpc.update_bgpvpn(context,
formated_bgpvpn)
def create_net_assoc_postcommit(self, context, net_assoc):
super(BaGPipeBGPVPNDriver, self).create_net_assoc_postcommit(context,
net_assoc)
self._update_bgpvpn_for_net_with_id(context,
net_assoc['network_id'],
net_assoc['bgpvpn_id'])
def delete_net_assoc_postcommit(self, context, net_assoc):
if get_network_ports(context, net_assoc['network_id']):
bgpvpn = self.get_bgpvpn(context, net_assoc['bgpvpn_id'])
formated_bgpvpn = self._format_bgpvpn(context, bgpvpn,
net_assoc['network_id'])
self.agent_rpc.delete_bgpvpn(context, formated_bgpvpn)
def _ignore_port(self, context, port):
if (port['device_owner'].startswith(
const.DEVICE_OWNER_NETWORK_PREFIX) and not
port['device_owner'] in
(debug_agent.DEVICE_OWNER_COMPUTE_PROBE,
debug_agent.DEVICE_OWNER_NETWORK_PROBE)):
LOG.info("Port %s owner is network:*, we'll do nothing",
port['id'])
return True
if v2.network_is_external(context, port['network_id']):
LOG.info("Port %s is on an external network, we'll do nothing",
port['id'])
return True
return False
@log_helpers.log_method_call
def notify_port_updated(self, context, port, original_port):
if self._ignore_port(context, port):
return
agent_host = port[portbindings.HOST_ID]
port_bgpvpn_info = {'id': port['id'],
'network_id': port['network_id']}
if (port['status'] == const.PORT_STATUS_ACTIVE and
original_port['status'] != const.PORT_STATUS_ACTIVE):
LOG.debug("notify_port_updated, port became ACTIVE")
bgpvpn_network_info = (
self._retrieve_bgpvpn_network_info_for_port(context, port)
)
if bgpvpn_network_info:
port_bgpvpn_info.update(bgpvpn_network_info)
self.agent_rpc.attach_port_on_bgpvpn(context,
port_bgpvpn_info,
agent_host)
else:
# currently not reached, because we need
# _retrieve_bgpvpn_network_info_for_port to always
# return network information, even in the absence
# of any BGPVPN port bound.
pass
elif (port['status'] == const.PORT_STATUS_DOWN and
original_port['status'] != const.PORT_STATUS_DOWN):
LOG.debug("notify_port_updated, port became DOWN")
self.agent_rpc.detach_port_from_bgpvpn(context,
port_bgpvpn_info,
agent_host)
else:
LOG.debug("new port status is %s, origin status was %s,"
" => no action", port['status'], original_port['status'])
@log_helpers.log_method_call
def notify_port_deleted(self, context, port):
port_bgpvpn_info = {'id': port['id'],
'network_id': port['network_id']}
if self._ignore_port(context, port):
return
self.agent_rpc.detach_port_from_bgpvpn(context,
port_bgpvpn_info,
port[portbindings.HOST_ID])
def create_router_assoc_postcommit(self, context, router_assoc):
super(BaGPipeBGPVPNDriver, self).create_router_assoc_postcommit(
context, router_assoc)
for net_id in get_networks_for_router(context,
router_assoc['router_id']):
self._update_bgpvpn_for_net_with_id(context,
net_id,
router_assoc['bgpvpn_id'])
def delete_router_assoc_postcommit(self, context, router_assoc):
for net_id in get_networks_for_router(context,
router_assoc['router_id']):
net_assoc = {'network_id': net_id,
'bgpvpn_id': router_assoc['bgpvpn_id']}
self.delete_net_assoc_postcommit(context, net_assoc)
@log_helpers.log_method_call
def notify_router_interface_created(self, context, router_id, net_id):
super(BaGPipeBGPVPNDriver, self).notify_router_interface_created(
context, router_id, net_id)
net_assocs = get_network_bgpvpn_assocs(context, net_id)
router_assocs = get_router_bgpvpn_assocs(context, router_id)
# if this router_interface is on a network bound to a BGPVPN,
# or if this router is bound to a BGPVPN,
# then we need to send and update for this network, including
# the gateway_mac
if net_assocs or router_assocs:
for bgpvpn in self._bgpvpns_for_network(context, net_id):
self._update_bgpvpn_for_network(context, net_id, bgpvpn)
for router_assoc in router_assocs:
self._update_bgpvpn_for_net_with_id(context,
net_id,
router_assoc['bgpvpn_id'])
@log_helpers.log_method_call
def notify_router_interface_deleted(self, context, router_id, net_id):
super(BaGPipeBGPVPNDriver, self).notify_router_interface_deleted(
context, router_id, net_id)
net_assocs = get_network_bgpvpn_assocs(context, net_id)
router_assocs = get_router_bgpvpn_assocs(context, router_id)
if net_assocs or router_assocs:
for bgpvpn in self._bgpvpns_for_network(context, net_id):
self._update_bgpvpn_for_network(context, net_id, bgpvpn)
for router_assoc in router_assocs:
net_assoc = {'network_id': net_id,
'bgpvpn_id': router_assoc['bgpvpn_id']}
self.delete_net_assoc_postcommit(context, net_assoc)
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
@log_helpers.log_method_call
def registry_port_updated(self, resource, event, trigger, payload):
try:
context = payload.context
port = payload.latest_state
original_port = payload.states[0]
self.notify_port_updated(context, port, original_port)
except Exception as e:
_log_callback_processing_exception(resource, event, trigger,
payload.metadata, e)
@registry.receives(resources.PORT, [events.AFTER_DELETE])
@log_helpers.log_method_call
def registry_port_deleted(self, resource, event, trigger, payload):
try:
context = payload.context
port = payload.latest_state
self.notify_port_deleted(context, port)
except Exception as e:
_log_callback_processing_exception(resource, event, trigger,
payload.metadata, e)
# contrary to mother class, no need to subscribe to router interface
# before-delete, because after delete, we still can generate RPCs
@registry.receives(resources.ROUTER_INTERFACE, [events.AFTER_DELETE])
@log_helpers.log_method_call
def registry_router_interface_deleted(self, resource, event, trigger,
payload=None):
try:
context = payload.context
# for router_interface after_delete, in stable/newton, the
# callback does not include the router_id directly, but we find
# it in the port device_id
router_id = payload.metadata.get('port')['device_id']
net_id = payload.metadata.get('port')['network_id']
self.notify_router_interface_deleted(context, router_id, net_id)
except Exception as e:
_log_callback_processing_exception(resource, event, trigger,
payload.metadata, e)
| 12,219
| 0
| 709
|
3bf82136b95654948d1c922678337c70dedf2483
| 857
|
py
|
Python
|
doc/snippets/rph_deserializer.py
|
michael-the1/diepvries
|
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
|
[
"MIT"
] | 67
|
2021-08-20T14:30:49.000Z
|
2022-03-22T23:37:08.000Z
|
doc/snippets/rph_deserializer.py
|
michael-the1/diepvries
|
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
|
[
"MIT"
] | 1
|
2022-01-22T08:19:38.000Z
|
2022-02-02T08:48:34.000Z
|
doc/snippets/rph_deserializer.py
|
michael-the1/diepvries
|
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
|
[
"MIT"
] | 6
|
2021-09-03T17:21:16.000Z
|
2021-12-22T12:11:51.000Z
|
from diepvries.deserializers.snowflake_deserializer import (
DatabaseConfiguration,
SnowflakeDeserializer,
)
if __name__ == "__main__":
deserialize()
| 26.78125
| 83
| 0.667445
|
from diepvries.deserializers.snowflake_deserializer import (
DatabaseConfiguration,
SnowflakeDeserializer,
)
def deserialize():
database_configuration = DatabaseConfiguration(
database="<DB>",
user="<USER>",
password="<PASSWORD>",
warehouse="<WAREHOUSE>",
account="<ACCOUNT>",
)
deserializer = SnowflakeDeserializer(
target_schema="dv",
target_tables=["h_account", "h_account_supplier", "h_account_transporter"],
database_configuration=database_configuration,
role_playing_hubs={
"h_account_supplier": "h_account",
"h_account_transporter": "h_account",
},
)
print(deserializer.deserialized_target_tables)
print([x.name for x in deserializer.deserialized_target_tables])
if __name__ == "__main__":
deserialize()
| 669
| 0
| 23
|
caa7fc2ab467b003c39e1164d5b171219ed1bd62
| 1,073
|
py
|
Python
|
metrics/uncertainty_confidence.py
|
Karthik-Ragunath/DDU
|
b9daae9304bdeb222857884ef8cb3b6b3d004d33
|
[
"MIT"
] | 43
|
2021-05-20T14:07:53.000Z
|
2022-03-23T12:58:26.000Z
|
metrics/uncertainty_confidence.py
|
Karthik-Ragunath/DDU
|
b9daae9304bdeb222857884ef8cb3b6b3d004d33
|
[
"MIT"
] | 3
|
2021-09-19T20:49:21.000Z
|
2022-03-07T10:25:47.000Z
|
metrics/uncertainty_confidence.py
|
Karthik-Ragunath/DDU
|
b9daae9304bdeb222857884ef8cb3b6b3d004d33
|
[
"MIT"
] | 8
|
2021-06-26T15:28:45.000Z
|
2022-02-19T02:07:05.000Z
|
"""
Metrics measuring either uncertainty or confidence of a model.
"""
import torch
import torch.nn.functional as F
| 21.897959
| 63
| 0.664492
|
"""
Metrics measuring either uncertainty or confidence of a model.
"""
import torch
import torch.nn.functional as F
def entropy(logits):
p = F.softmax(logits, dim=1)
logp = F.log_softmax(logits, dim=1)
plogp = p * logp
entropy = -torch.sum(plogp, dim=1)
return entropy
def logsumexp(logits):
return torch.logsumexp(logits, dim=1, keepdim=False)
def confidence(logits):
p = F.softmax(logits, dim=1)
confidence, _ = torch.max(p, dim=1)
return confidence
def entropy_prob(probs):
p = probs
eps = 1e-12
logp = torch.log(p + eps)
plogp = p * logp
entropy = -torch.sum(plogp, dim=1)
return entropy
def mutual_information_prob(probs):
mean_output = torch.mean(probs, dim=0)
predictive_entropy = entropy_prob(mean_output)
# Computing expectation of entropies
p = probs
eps = 1e-12
logp = torch.log(p + eps)
plogp = p * logp
exp_entropies = torch.mean(-torch.sum(plogp, dim=2), dim=0)
# Computing mutual information
mi = predictive_entropy - exp_entropies
return mi
| 837
| 0
| 115
|
4bd8f34b13a613b9fbf79810fcb40ce5c3a6a951
| 3,717
|
py
|
Python
|
pysnmp-with-texts/TUBS-IBR-AGENT-CAPABILITIES.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/TUBS-IBR-AGENT-CAPABILITIES.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/TUBS-IBR-AGENT-CAPABILITIES.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module TUBS-IBR-AGENT-CAPABILITIES (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TUBS-IBR-AGENT-CAPABILITIES
# Produced by pysmi-0.3.4 at Wed May 1 15:27:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, AgentCapabilities, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "AgentCapabilities", "NotificationGroup")
Gauge32, ObjectIdentity, Counter64, Counter32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, IpAddress, MibIdentifier, Bits, Integer32, ModuleIdentity, Unsigned32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ObjectIdentity", "Counter64", "Counter32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "IpAddress", "MibIdentifier", "Bits", "Integer32", "ModuleIdentity", "Unsigned32", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ibr, = mibBuilder.importSymbols("TUBS-SMI", "ibr")
ibrAgentCapabilities = ModuleIdentity((1, 3, 6, 1, 4, 1, 1575, 1, 6))
ibrAgentCapabilities.setRevisions(('2000-02-09 00:00', '1998-08-05 16:23', '1997-02-14 10:23',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ibrAgentCapabilities.setRevisionsDescriptions(('Updated IMPORTS and minor stylistic fixes.', 'Added agent capabilities for the WWW-MIB subagent version 1.0.', 'The initial revision of this module.',))
if mibBuilder.loadTexts: ibrAgentCapabilities.setLastUpdated('200002090000Z')
if mibBuilder.loadTexts: ibrAgentCapabilities.setOrganization('TU Braunschweig')
if mibBuilder.loadTexts: ibrAgentCapabilities.setContactInfo('Juergen Schoenwaelder TU Braunschweig Bueltenweg 74/75 38106 Braunschweig Germany Tel: +49 531 391 3283 Fax: +49 531 391 5936 E-mail: schoenw@ibr.cs.tu-bs.de')
if mibBuilder.loadTexts: ibrAgentCapabilities.setDescription('Agent capability statements.')
linux = MibIdentifier((1, 3, 6, 1, 4, 1, 1575, 1, 6, 1))
linuxAgent3dot3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 1575, 1, 6, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
linuxAgent3dot3 = linuxAgent3dot3.setProductRelease('cmu-snmp-linux-3.3')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
linuxAgent3dot3 = linuxAgent3dot3.setStatus('current')
if mibBuilder.loadTexts: linuxAgent3dot3.setDescription('CMU SNMP v1.1b + SNMPv2 USEC + LINUX')
wwwSubagent1dot0 = AgentCapabilities((1, 3, 6, 1, 4, 1, 1575, 1, 6, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
wwwSubagent1dot0 = wwwSubagent1dot0.setProductRelease('TUBS Apache WWW-MIB sub-agent version 1.0')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
wwwSubagent1dot0 = wwwSubagent1dot0.setStatus('current')
if mibBuilder.loadTexts: wwwSubagent1dot0.setDescription('TUBS WWW-MIB sub-agent version 1.0 for Solaris.')
mibBuilder.exportSymbols("TUBS-IBR-AGENT-CAPABILITIES", linuxAgent3dot3=linuxAgent3dot3, ibrAgentCapabilities=ibrAgentCapabilities, PYSNMP_MODULE_ID=ibrAgentCapabilities, wwwSubagent1dot0=wwwSubagent1dot0, linux=linux)
| 97.815789
| 477
| 0.769438
|
#
# PySNMP MIB module TUBS-IBR-AGENT-CAPABILITIES (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TUBS-IBR-AGENT-CAPABILITIES
# Produced by pysmi-0.3.4 at Wed May 1 15:27:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, AgentCapabilities, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "AgentCapabilities", "NotificationGroup")
Gauge32, ObjectIdentity, Counter64, Counter32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, IpAddress, MibIdentifier, Bits, Integer32, ModuleIdentity, Unsigned32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ObjectIdentity", "Counter64", "Counter32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "IpAddress", "MibIdentifier", "Bits", "Integer32", "ModuleIdentity", "Unsigned32", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ibr, = mibBuilder.importSymbols("TUBS-SMI", "ibr")
ibrAgentCapabilities = ModuleIdentity((1, 3, 6, 1, 4, 1, 1575, 1, 6))
ibrAgentCapabilities.setRevisions(('2000-02-09 00:00', '1998-08-05 16:23', '1997-02-14 10:23',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ibrAgentCapabilities.setRevisionsDescriptions(('Updated IMPORTS and minor stylistic fixes.', 'Added agent capabilities for the WWW-MIB subagent version 1.0.', 'The initial revision of this module.',))
if mibBuilder.loadTexts: ibrAgentCapabilities.setLastUpdated('200002090000Z')
if mibBuilder.loadTexts: ibrAgentCapabilities.setOrganization('TU Braunschweig')
if mibBuilder.loadTexts: ibrAgentCapabilities.setContactInfo('Juergen Schoenwaelder TU Braunschweig Bueltenweg 74/75 38106 Braunschweig Germany Tel: +49 531 391 3283 Fax: +49 531 391 5936 E-mail: schoenw@ibr.cs.tu-bs.de')
if mibBuilder.loadTexts: ibrAgentCapabilities.setDescription('Agent capability statements.')
linux = MibIdentifier((1, 3, 6, 1, 4, 1, 1575, 1, 6, 1))
linuxAgent3dot3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 1575, 1, 6, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
linuxAgent3dot3 = linuxAgent3dot3.setProductRelease('cmu-snmp-linux-3.3')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
linuxAgent3dot3 = linuxAgent3dot3.setStatus('current')
if mibBuilder.loadTexts: linuxAgent3dot3.setDescription('CMU SNMP v1.1b + SNMPv2 USEC + LINUX')
wwwSubagent1dot0 = AgentCapabilities((1, 3, 6, 1, 4, 1, 1575, 1, 6, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
wwwSubagent1dot0 = wwwSubagent1dot0.setProductRelease('TUBS Apache WWW-MIB sub-agent version 1.0')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
wwwSubagent1dot0 = wwwSubagent1dot0.setStatus('current')
if mibBuilder.loadTexts: wwwSubagent1dot0.setDescription('TUBS WWW-MIB sub-agent version 1.0 for Solaris.')
mibBuilder.exportSymbols("TUBS-IBR-AGENT-CAPABILITIES", linuxAgent3dot3=linuxAgent3dot3, ibrAgentCapabilities=ibrAgentCapabilities, PYSNMP_MODULE_ID=ibrAgentCapabilities, wwwSubagent1dot0=wwwSubagent1dot0, linux=linux)
| 0
| 0
| 0
|
7075d8ceddff8373d1448de790f5c8e02b65488c
| 2,373
|
py
|
Python
|
generate_letter.py
|
drewlinsley/cabc
|
74726509e542d5f0f04bf297c211cca9f6e87b56
|
[
"MIT"
] | 2
|
2020-04-19T01:19:10.000Z
|
2021-06-08T02:04:48.000Z
|
generate_letter.py
|
drewlinsley/cabc
|
74726509e542d5f0f04bf297c211cca9f6e87b56
|
[
"MIT"
] | null | null | null |
generate_letter.py
|
drewlinsley/cabc
|
74726509e542d5f0f04bf297c211cca9f6e87b56
|
[
"MIT"
] | null | null | null |
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import numpy as np
import matplotlib.pyplot as plt
import os
from skimage.filters import threshold_otsu
import scipy
from scipy import ndimage
from scipy.interpolate import griddata
import cv2
import preprocess
if __name__ == "__main__":
# DEFINE AND LOAD FONT
script_root = '/Users/junkyungkim/Documents/PycharmProjects/cluttered_nist'
fontnames = ['FUTRFW.ttf',
'Instruction.otf',
'absender1.ttf',
'5Identification-Mono.ttf',
'7Segment.ttf',
'VCR_OSD_MONO_1.001.ttf',
'Instruction.otf',
'Segment16B Regular.ttf']
std_fontsizes = [225, 240, 225, 150, 255, 255, 255, 255]
std_thin_iters = [6, 15, 4, 9, 9, 2]
scale = 1 # 0.5
for fontname, std_fontsize, std_thin_iter in zip(fontnames, std_fontsizes, std_thin_iters):
std_fontsize = int(std_fontsize*scale)
std_thin_iter = int(std_thin_iter*scale)
font = ImageFont.truetype(os.path.join(script_root,'fonts',fontname), std_fontsize)
# RENDER
img=Image.new("RGBA", (2500, 300), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((0, 0), "ABCDEFGXYZ", (0, 0, 0), font=font)
draw = ImageDraw.Draw(img)
# MORPHOLOGICAL POSTPROC (FOR CONSTNAT STROKE THICKNESS)
img = 255 - np.mean(np.array(img), axis=2)
binary = img > 128
# img_closed = scipy.ndimage.binary_closing(binary.astype(np.int), iterations=20)##np.maximum(iterations / 2, 1))
img_eroded = (scipy.ndimage.morphology.binary_erosion(binary, iterations=std_thin_iter) * 255).astype(np.uint8)
landscape = preprocess.generate_distortion_mask(img_eroded, sigma=[4000,2000], num_centers=[30,20])
warped = preprocess.custom_warp(img_eroded, landscape, power=0.07)
# img_dist = img_eroded
# distCoeffs = [-.1, 1.0, 1.0, 1.0]
# focal_length = [1000, 1000]
# for coord in [[400,100],[500,150],[600,200]]:
# distCoeffs[0] = distCoeffs[0]*-1
# img_dist = custom_fisheye(img_dist, coord, distCoeffs, focal_length)
# import preprocess
# im_pixelated = preprocess.pixelate_obj(img_eroded, [10 * scale, 10 * scale], 0.1, 5 * scale, ignore_fit=True)
plt.subplot(211);plt.imshow(binary, cmap='gray')
plt.subplot(212);plt.imshow(warped, cmap='gray')
plt.show()
# thinned = zhangSuen(binary)
# plt.subplot(121)
# plt.imshow(img)
# plt.subplot(122)
# plt.imshow(thinned)
# plt.show()
| 32.958333
| 115
| 0.710072
|
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import numpy as np
import matplotlib.pyplot as plt
import os
from skimage.filters import threshold_otsu
import scipy
from scipy import ndimage
from scipy.interpolate import griddata
import cv2
import preprocess
if __name__ == "__main__":
# DEFINE AND LOAD FONT
script_root = '/Users/junkyungkim/Documents/PycharmProjects/cluttered_nist'
fontnames = ['FUTRFW.ttf',
'Instruction.otf',
'absender1.ttf',
'5Identification-Mono.ttf',
'7Segment.ttf',
'VCR_OSD_MONO_1.001.ttf',
'Instruction.otf',
'Segment16B Regular.ttf']
std_fontsizes = [225, 240, 225, 150, 255, 255, 255, 255]
std_thin_iters = [6, 15, 4, 9, 9, 2]
scale = 1 # 0.5
for fontname, std_fontsize, std_thin_iter in zip(fontnames, std_fontsizes, std_thin_iters):
std_fontsize = int(std_fontsize*scale)
std_thin_iter = int(std_thin_iter*scale)
font = ImageFont.truetype(os.path.join(script_root,'fonts',fontname), std_fontsize)
# RENDER
img=Image.new("RGBA", (2500, 300), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((0, 0), "ABCDEFGXYZ", (0, 0, 0), font=font)
draw = ImageDraw.Draw(img)
# MORPHOLOGICAL POSTPROC (FOR CONSTNAT STROKE THICKNESS)
img = 255 - np.mean(np.array(img), axis=2)
binary = img > 128
# img_closed = scipy.ndimage.binary_closing(binary.astype(np.int), iterations=20)##np.maximum(iterations / 2, 1))
img_eroded = (scipy.ndimage.morphology.binary_erosion(binary, iterations=std_thin_iter) * 255).astype(np.uint8)
landscape = preprocess.generate_distortion_mask(img_eroded, sigma=[4000,2000], num_centers=[30,20])
warped = preprocess.custom_warp(img_eroded, landscape, power=0.07)
# img_dist = img_eroded
# distCoeffs = [-.1, 1.0, 1.0, 1.0]
# focal_length = [1000, 1000]
# for coord in [[400,100],[500,150],[600,200]]:
# distCoeffs[0] = distCoeffs[0]*-1
# img_dist = custom_fisheye(img_dist, coord, distCoeffs, focal_length)
# import preprocess
# im_pixelated = preprocess.pixelate_obj(img_eroded, [10 * scale, 10 * scale], 0.1, 5 * scale, ignore_fit=True)
plt.subplot(211);plt.imshow(binary, cmap='gray')
plt.subplot(212);plt.imshow(warped, cmap='gray')
plt.show()
# thinned = zhangSuen(binary)
# plt.subplot(121)
# plt.imshow(img)
# plt.subplot(122)
# plt.imshow(thinned)
# plt.show()
| 0
| 0
| 0
|
2bf52aa5f62814599fc3741d49983e8f1296f5e3
| 3,503
|
py
|
Python
|
qlknn/plots/load_data.py
|
Karel-van-de-Plassche/QLKNN-develop
|
f2d29be625c2ddbddad6c1e98e5c03a43cf2797f
|
[
"MIT"
] | null | null | null |
qlknn/plots/load_data.py
|
Karel-van-de-Plassche/QLKNN-develop
|
f2d29be625c2ddbddad6c1e98e5c03a43cf2797f
|
[
"MIT"
] | null | null | null |
qlknn/plots/load_data.py
|
Karel-van-de-Plassche/QLKNN-develop
|
f2d29be625c2ddbddad6c1e98e5c03a43cf2797f
|
[
"MIT"
] | 2
|
2018-02-28T14:18:43.000Z
|
2018-11-26T11:06:08.000Z
|
import os
import sys
import numpy as np
import scipy.stats as stats
import pandas as pd
from IPython import embed
from qlknn.NNDB.model import Network, NetworkJSON
from qlknn.models.ffnn import QuaLiKizNDNN
shortname = {'Ate': '$R/L_{T_e}$',
'Ati': '$R/L_{T_i}$'}
longname ={
'Ate': 'Normalized electron temperature gradient $R/L_{T_e}$',
'Ati': 'Normalized ion temperature gradient $R/L_{T_i}$'}
nameconvert = {
'An': '$R/L_n$',
#'Nustar': '$\\nu^*$',
'Nustar': '$log_{10}(\\nu^*)$',
'logNustar': '$log_{10}(\\nu^*)$',
'Ti_Te': 'Relative temperature $T_i/T_e$',
'Zeff': '$Z_{eff}$',
'q': '$q$',
'smag': 'Magnetic shear $\hat{s}$',
'x': '$\\varepsilon\,(r/R)$',
'efe_GB': '$q_e\,[GB]$',
'efi_GB': '$q_i\,[GB]$',
'efiITG_GB': '$q_{ITG, i}\,[GB]$',
'efeITG_GB': '$q_{ITG, e}\,[GB]$',
'efiTEM_GB': '$q_{TEM, i}\,[GB]$',
'efeTEM_GB': '$q_{TEM, e}\,[GB]$',
'efeETG_GB': 'Normalized heat flux $q$',
'pfe_GB': '$\Gamma_e\,[GB]$',
'pfi_GB': '$\Gamma_i\,[GB]$',
'pfeITG_GB': '$\Gamma_{ITG, i}\,[GB]$',
'pfeTEM_GB': '$\Gamma_{TEM, i}\,[GB]$',
'gam_leq_GB': '$\gamma_{max, \leq 2}\,[GB]$'
}
comboname = {
'efiTEM_GB_div_efeTEM_GB': nameconvert['efiTEM_GB'] + '/' + nameconvert['efeTEM_GB'],
'pfeTEM_GB_div_efeTEM_GB': nameconvert['pfeTEM_GB'] + '/' + nameconvert['efeTEM_GB'],
'efeITG_GB_div_efiITG_GB': nameconvert['efeITG_GB'] + '/' + nameconvert['efiITG_GB'],
'pfeITG_GB_div_efiITG_GB': nameconvert['pfeITG_GB'] + '/' + nameconvert['efiITG_GB']
}
nameconvert.update(shortname)
nameconvert.update(comboname)
| 29.940171
| 89
| 0.586355
|
import os
import sys
import numpy as np
import scipy.stats as stats
import pandas as pd
from IPython import embed
from qlknn.NNDB.model import Network, NetworkJSON
from qlknn.models.ffnn import QuaLiKizNDNN
def load_data(id):
store = pd.HDFStore('../7D_nions0_flat.h5')
input = store['megarun1/input']
data = store['megarun1/flattened']
root_name = '/megarun1/nndb_nn/'
query = (Network.select(Network.target_names).where(Network.id == id).tuples()).get()
target_names = query[0]
if len(target_names) == 1:
target_name = target_names[0]
else:
NotImplementedError('Multiple targets not implemented yet')
print(target_name)
parent_name = root_name + target_name + '/'
network_name = parent_name + str(id)
network_name += '_noclip'
nn = load_nn(id)
df = data[target_name].to_frame('target')
df['prediction'] = store[network_name].iloc[:, 0]
df = df.astype('float64')
df['residuals'] = df['target'] - df['prediction']
df['maxgam'] = pd.DataFrame({'leq': data['gam_leq_GB'],
'less': data['gam_less_GB']}).max(axis=1)
return input, df, nn
def load_nn(id):
subquery = (Network.select(NetworkJSON.network_json)
.where(Network.id == id)
.join(NetworkJSON)
.tuples()).get()
json_dict = subquery[0]
nn = QuaLiKizNDNN(json_dict)
return nn
shortname = {'Ate': '$R/L_{T_e}$',
'Ati': '$R/L_{T_i}$'}
longname ={
'Ate': 'Normalized electron temperature gradient $R/L_{T_e}$',
'Ati': 'Normalized ion temperature gradient $R/L_{T_i}$'}
nameconvert = {
'An': '$R/L_n$',
#'Nustar': '$\\nu^*$',
'Nustar': '$log_{10}(\\nu^*)$',
'logNustar': '$log_{10}(\\nu^*)$',
'Ti_Te': 'Relative temperature $T_i/T_e$',
'Zeff': '$Z_{eff}$',
'q': '$q$',
'smag': 'Magnetic shear $\hat{s}$',
'x': '$\\varepsilon\,(r/R)$',
'efe_GB': '$q_e\,[GB]$',
'efi_GB': '$q_i\,[GB]$',
'efiITG_GB': '$q_{ITG, i}\,[GB]$',
'efeITG_GB': '$q_{ITG, e}\,[GB]$',
'efiTEM_GB': '$q_{TEM, i}\,[GB]$',
'efeTEM_GB': '$q_{TEM, e}\,[GB]$',
'efeETG_GB': 'Normalized heat flux $q$',
'pfe_GB': '$\Gamma_e\,[GB]$',
'pfi_GB': '$\Gamma_i\,[GB]$',
'pfeITG_GB': '$\Gamma_{ITG, i}\,[GB]$',
'pfeTEM_GB': '$\Gamma_{TEM, i}\,[GB]$',
'gam_leq_GB': '$\gamma_{max, \leq 2}\,[GB]$'
}
comboname = {
'efiTEM_GB_div_efeTEM_GB': nameconvert['efiTEM_GB'] + '/' + nameconvert['efeTEM_GB'],
'pfeTEM_GB_div_efeTEM_GB': nameconvert['pfeTEM_GB'] + '/' + nameconvert['efeTEM_GB'],
'efeITG_GB_div_efiITG_GB': nameconvert['efeITG_GB'] + '/' + nameconvert['efiITG_GB'],
'pfeITG_GB_div_efiITG_GB': nameconvert['pfeITG_GB'] + '/' + nameconvert['efiITG_GB']
}
nameconvert.update(shortname)
nameconvert.update(comboname)
def prettify_df(input, data):
try:
del input['nions']
except KeyError:
pass
for ii, col in enumerate(input):
if col == u'Nustar':
input[col] = input[col].apply(np.log10)
#se = input[col]
#se.name = nameconvert[se.name]
input['x'] = (input['x'] / 3)
input.rename(columns=nameconvert, inplace=True)
data.rename(columns=nameconvert, inplace=True)
#for ii, col in enumerate(data):
# se = data[col]
# try:
# se.name = nameconvert[se.name]
# except KeyError:
# warn('Did not translate name for ' + se.name)
return input, data
| 1,801
| 0
| 68
|
2bcc82625a9e7a68e52a4eca462930e5ea09cbb5
| 4,424
|
py
|
Python
|
src/arclet/alconna/builtin/actions.py
|
ArcletProject/Alconna
|
e7532fe04a425ac2a1f64a0604017194f7cdc535
|
[
"MIT"
] | 13
|
2021-12-14T05:47:03.000Z
|
2022-03-10T15:52:27.000Z
|
src/arclet/alconna/builtin/actions.py
|
ArcletProject/Alconna
|
e7532fe04a425ac2a1f64a0604017194f7cdc535
|
[
"MIT"
] | 31
|
2021-12-14T15:16:01.000Z
|
2022-03-27T16:51:33.000Z
|
src/arclet/alconna/builtin/actions.py
|
ArcletProject/Alconna
|
e7532fe04a425ac2a1f64a0604017194f7cdc535
|
[
"MIT"
] | 1
|
2022-03-22T13:33:04.000Z
|
2022-03-22T13:33:04.000Z
|
"""Alconna ArgAction相关"""
from datetime import datetime
from typing import Any, Optional, TYPE_CHECKING, Literal
from arclet.alconna.components.action import ArgAction
from arclet.alconna.components.behavior import ArpamarBehavior
from arclet.alconna.exceptions import BehaveCancelled, OutBoundsBehavior
from arclet.alconna.config import config
class _StoreValue(ArgAction):
"""针对特定值的类"""
def store_value(value: Any):
"""存储一个值"""
return _StoreValue(value)
if TYPE_CHECKING:
from arclet.alconna import alconna_version
from arclet.alconna.arpamar import Arpamar
def version(value: Optional[tuple]):
"""返回一个以元组形式存储的版本信息"""
return _StoreValue(value) if value else _StoreValue(alconna_version)
def set_default(value: Any, option: Optional[str] = None, subcommand: Optional[str] = None):
"""
设置一个选项的默认值, 在无该选项时会被设置
当option与subcommand同时传入时, 则会被设置为该subcommand内option的默认值
Args:
value: 默认值
option: 选项名
subcommand: 子命令名
"""
return _SetDefault()
def exclusion(target_path: str, other_path: str):
"""
当设置的两个路径同时存在时, 抛出异常
Args:
target_path: 目标路径
other_path: 其他路径
"""
return _EXCLUSION()
def cool_down(seconds: float):
"""
当设置的时间间隔内被调用时, 抛出异常
Args:
seconds: 时间间隔
"""
return _CoolDown()
def inclusion(*targets: str, flag: Literal["any", "all"] = "any"):
"""
当设置的路径不存在时, 抛出异常
Args:
targets: 路径列表
flag: 匹配方式, 可选值为"any"或"all", 默认为"any"
"""
return _Inclusion()
| 32.291971
| 105
| 0.618445
|
"""Alconna ArgAction相关"""
from datetime import datetime
from typing import Any, Optional, TYPE_CHECKING, Literal
from arclet.alconna.components.action import ArgAction
from arclet.alconna.components.behavior import ArpamarBehavior
from arclet.alconna.exceptions import BehaveCancelled, OutBoundsBehavior
from arclet.alconna.config import config
class _StoreValue(ArgAction):
"""针对特定值的类"""
def __init__(self, value: Any):
super().__init__(lambda: value)
def handle(self, option_dict, varargs=None, kwargs=None, is_raise_exception=False):
return self.action()
def store_value(value: Any):
"""存储一个值"""
return _StoreValue(value)
if TYPE_CHECKING:
from arclet.alconna import alconna_version
from arclet.alconna.arpamar import Arpamar
def version(value: Optional[tuple]):
"""返回一个以元组形式存储的版本信息"""
return _StoreValue(value) if value else _StoreValue(alconna_version)
def set_default(value: Any, option: Optional[str] = None, subcommand: Optional[str] = None):
"""
设置一个选项的默认值, 在无该选项时会被设置
当option与subcommand同时传入时, 则会被设置为该subcommand内option的默认值
Args:
value: 默认值
option: 选项名
subcommand: 子命令名
"""
class _SetDefault(ArpamarBehavior):
def operate(self, interface: "Arpamar"):
if not option and not subcommand:
raise BehaveCancelled
if option and subcommand is None:
options = interface.query("options", {})
options.setdefault(option, {"value": value, "args": {}})
if subcommand and option is None:
subcommands = interface.query("subcommands", {})
subcommands.setdefault(subcommand, {"value": value, "args": {}, "options": {}})
if option and subcommand:
sub_options = interface.query(f"{subcommand}.options", {})
sub_options.setdefault(option, {"value": value, "args": {}})
return _SetDefault()
def exclusion(target_path: str, other_path: str):
"""
当设置的两个路径同时存在时, 抛出异常
Args:
target_path: 目标路径
other_path: 其他路径
"""
class _EXCLUSION(ArpamarBehavior):
def operate(self, interface: "Arpamar"):
if interface.query(target_path) and interface.query(other_path):
interface.matched = False
if interface.source.is_raise_exception:
raise OutBoundsBehavior(
config.lang.behavior_exclude_matched.format(target=target_path, other=other_path)
)
interface.error_info = OutBoundsBehavior(
config.lang.behavior_exclude_matched.format(target=target_path, other=other_path)
)
return _EXCLUSION()
def cool_down(seconds: float):
"""
当设置的时间间隔内被调用时, 抛出异常
Args:
seconds: 时间间隔
"""
class _CoolDown(ArpamarBehavior):
def __init__(self):
self.last_time = datetime.now()
def operate(self, interface: "Arpamar"):
current_time = datetime.now()
if (current_time - self.last_time).total_seconds() < seconds:
if interface.source.is_raise_exception:
raise OutBoundsBehavior(config.lang.behavior_cooldown_matched)
interface.matched = False
interface.error_info = OutBoundsBehavior(config.lang.behavior_cooldown_matched)
else:
self.last_time = current_time
return _CoolDown()
def inclusion(*targets: str, flag: Literal["any", "all"] = "any"):
"""
当设置的路径不存在时, 抛出异常
Args:
targets: 路径列表
flag: 匹配方式, 可选值为"any"或"all", 默认为"any"
"""
class _Inclusion(ArpamarBehavior):
def operate(self, interface: "Arpamar"):
if flag == "all":
for target in targets:
if not interface.query(target):
interface.matched = False
interface.error_info = OutBoundsBehavior(config.lang.behavior_inclusion_matched)
break
else:
all_count = len(targets) - sum(1 for target in targets if interface.require(target))
if all_count > 0:
interface.matched = False
interface.error_info = OutBoundsBehavior(config.lang.behavior_inclusion_matched)
return _Inclusion()
| 2,505
| 52
| 313
|
a49de0f42e9244dddb80c5007220619b2b81057e
| 142
|
py
|
Python
|
notification/admin.py
|
tiagocdr/twitter-clone
|
53737774f2f7766c69f9f9d96458a630124fa6d8
|
[
"MIT"
] | null | null | null |
notification/admin.py
|
tiagocdr/twitter-clone
|
53737774f2f7766c69f9f9d96458a630124fa6d8
|
[
"MIT"
] | null | null | null |
notification/admin.py
|
tiagocdr/twitter-clone
|
53737774f2f7766c69f9f9d96458a630124fa6d8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from notification.models import Notifications
# Register your models here.
admin.site.register(Notifications)
| 35.5
| 45
| 0.852113
|
from django.contrib import admin
from notification.models import Notifications
# Register your models here.
admin.site.register(Notifications)
| 0
| 0
| 0
|
3eb8467a6d27f9eb162881b64089f508aede1b60
| 5,742
|
py
|
Python
|
src/main.py
|
westernmagic/outer_ear
|
a2d193c6c2ddb22f0aee8ad6971b019d1096c114
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
westernmagic/outer_ear
|
a2d193c6c2ddb22f0aee8ad6971b019d1096c114
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
westernmagic/outer_ear
|
a2d193c6c2ddb22f0aee8ad6971b019d1096c114
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Outer ear simulator
Author: Michal Sudwoj <msudwoj@student.ethz.ch>
Version: 1.0.0
Data: 2019-09-09
'''
from typing import Tuple
import numpy as np
import scipy.io.wavfile as wav
import scipy.signal as ss
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pysofaconventions import SOFAFile
def head(data : np.ndarray, sofa : SOFAFile, azimuth : float, elevation : float):
'''
Apply effects of the head (HRTF)
'''
from scipy.spatial import KDTree
s = get_sofa(sofa)
pos = s.getVariableValue('SourcePosition')
# find closest position to requested azimuth and elevation
# TODO: consider normalizing position units to eg. degrees
index = KDTree(pos).query([azimuth, elevation, 1])[1]
hrir = s.getDataIR()[index, :, :]
data = data.T
left = ss.fftconvolve(data, hrir[0])
right = ss.fftconvolve(data, hrir[1])
output = np.asarray([left, right]).swapaxes(-1, 0)
return output
def canal(input : np.ndarray, f_s: int, l : float, d : float):
'''
Apply effects of the ear canal
Modeled as a bandpass filter, as in 'Matlab Auditory Periphery (MAP)'
'''
assert f_s > 0
assert l >= 0
assert d >= 0
v = 343
gain = 10
order = 1
f_nyq = f_s / 2
for n in [1, 3, 5]:
# 'Stopped pipe' resonator; resonating frequency
f_r = (n * v) / (4 * l / 1000 + 0.4 * d / 1000)
# bandpass cut offsets somewhat chosen s.t. for the first mode, they coincide with the parameters from MAP
lowcut = f_r - 1500 # Hz
highcut = f_r + 500 # Hz
low = lowcut / f_nyq
high = highcut / f_nyq
b, a = ss.butter(order, [low, high], btype = 'band')
input += gain * ss.lfilter(b, a, input)
return input
def middle(input):
'''
Apply the effects of the middle ear
Modelled soley as impedence mismatch and lever
'''
z_air = 414 # kg m^-2 s^-1
z_water = 1.48e6 # kg m^-2 s^-1
A_eardrum = 60 # mm^2
A_oval = 3.2 # mm^2
lever_malleus = 1.3
reflected = ((z_air - z_water) / (z_air + z_water)) ** 2
transmitted = 1 - reflected
return input * transmitted * (A_eardrum / A_oval) * lever_malleus
def read(filename : str) -> Tuple[np.ndarray, float]:
'''
Read WAV file and normalize to float array
'''
f_s, data = wav.read(filename)
if data.dtype == 'uint8':
data = data / 255 - 0.5
elif data.dtype == 'int16':
data = data / 32767
elif data.dtype == 'int32':
data = data / 2147483647
elif data.dtype == 'float32':
data = 1.0 * data
else:
eprint(f'Input error: data.dtype = {data.dtype}')
exit(1)
if data.ndim == 1:
# mono
pass
elif data.ndim == 2:
data = data[:, 0]
else:
eprint(f'Input error: data.ndim = {data.ndim}')
exit(1)
return data, f_s
if __name__ == "__main__":
main()
| 24.965217
| 114
| 0.574713
|
#!/usr/bin/env python
'''
Outer ear simulator
Author: Michal Sudwoj <msudwoj@student.ethz.ch>
Version: 1.0.0
Data: 2019-09-09
'''
from typing import Tuple
import numpy as np
import scipy.io.wavfile as wav
import scipy.signal as ss
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pysofaconventions import SOFAFile
def main() -> None:
args = arg_parser().parse_args()
data, f_s = read(args.input_file)
if args.head:
data = head(data, args.sofa, args.azimuth, args.elevation)
if args.canal:
data = canal(data, f_s, args.l, args.d)
if args.middle:
data = middle(data)
wav.write(args.output_file, f_s, data)
def head(data : np.ndarray, sofa : SOFAFile, azimuth : float, elevation : float):
'''
Apply effects of the head (HRTF)
'''
from scipy.spatial import KDTree
s = get_sofa(sofa)
pos = s.getVariableValue('SourcePosition')
# find closest position to requested azimuth and elevation
# TODO: consider normalizing position units to eg. degrees
index = KDTree(pos).query([azimuth, elevation, 1])[1]
hrir = s.getDataIR()[index, :, :]
data = data.T
left = ss.fftconvolve(data, hrir[0])
right = ss.fftconvolve(data, hrir[1])
output = np.asarray([left, right]).swapaxes(-1, 0)
return output
def canal(input : np.ndarray, f_s: int, l : float, d : float):
'''
Apply effects of the ear canal
Modeled as a bandpass filter, as in 'Matlab Auditory Periphery (MAP)'
'''
assert f_s > 0
assert l >= 0
assert d >= 0
v = 343
gain = 10
order = 1
f_nyq = f_s / 2
for n in [1, 3, 5]:
# 'Stopped pipe' resonator; resonating frequency
f_r = (n * v) / (4 * l / 1000 + 0.4 * d / 1000)
# bandpass cut offsets somewhat chosen s.t. for the first mode, they coincide with the parameters from MAP
lowcut = f_r - 1500 # Hz
highcut = f_r + 500 # Hz
low = lowcut / f_nyq
high = highcut / f_nyq
b, a = ss.butter(order, [low, high], btype = 'band')
input += gain * ss.lfilter(b, a, input)
return input
def middle(input):
'''
Apply the effects of the middle ear
Modelled soley as impedence mismatch and lever
'''
z_air = 414 # kg m^-2 s^-1
z_water = 1.48e6 # kg m^-2 s^-1
A_eardrum = 60 # mm^2
A_oval = 3.2 # mm^2
lever_malleus = 1.3
reflected = ((z_air - z_water) / (z_air + z_water)) ** 2
transmitted = 1 - reflected
return input * transmitted * (A_eardrum / A_oval) * lever_malleus
def arg_parser() -> ArgumentParser:
parser = ArgumentParser(
formatter_class = ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--head',
help = 'Consider head effects',
dest = 'head',
action = 'store_true'
)
parser.add_argument(
'--no-head',
dest = 'head',
action = 'store_false'
)
parser.set_defaults(head = True)
parser.add_argument(
'--canal',
help = 'Consider ear canal effects',
dest = 'canal',
action = 'store_true'
)
parser.add_argument(
'--no-canal',
dest = 'canal',
action = 'store_false'
)
parser.set_defaults(canal = True)
parser.add_argument(
'--middle',
help = 'Consider middle ear effects',
dest = 'middle',
action = 'store_true'
)
parser.add_argument(
'--no-middle',
dest = 'middle',
action = 'store_false'
)
parser.set_defaults(middle = True)
parser.add_argument(
'--sofa',
help = 'HTRF Sofa file',
default = 'http://sofacoustics.org/data/database/cipic/subject_003.sofa'
)
parser.add_argument(
'-a', '--azimuth',
help = 'Azimuth of source in SOFA file units',
default = 0,
type = float
)
parser.add_argument(
'-e', '--elevation',
help = 'Elevation of source in SOFA file units',
default = 0,
type = float
)
parser.add_argument(
'-l',
help = 'Ear canal length in mm',
default = 22,
type = float
)
parser.add_argument(
'-d',
help = 'Ear canal diameter in mm',
default = 7,
type = float
)
parser.add_argument(
'input_file',
help = 'Input file'
)
parser.add_argument(
'output_file',
help = 'Output file'
)
return parser
def read(filename : str) -> Tuple[np.ndarray, float]:
'''
Read WAV file and normalize to float array
'''
f_s, data = wav.read(filename)
if data.dtype == 'uint8':
data = data / 255 - 0.5
elif data.dtype == 'int16':
data = data / 32767
elif data.dtype == 'int32':
data = data / 2147483647
elif data.dtype == 'float32':
data = 1.0 * data
else:
eprint(f'Input error: data.dtype = {data.dtype}')
exit(1)
if data.ndim == 1:
# mono
pass
elif data.ndim == 2:
data = data[:, 0]
else:
eprint(f'Input error: data.ndim = {data.ndim}')
exit(1)
return data, f_s
def get_sofa(url : str) -> SOFAFile:
import requests
from tempfile import NamedTemporaryFile
if url.startswith(('http://', 'https://')):
r = requests.get(url)
r.raise_for_status()
with NamedTemporaryFile() as f:
f.write(r.content)
return SOFAFile(f.name, 'r')
elif url.startswith('file://'):
url = url[7:]
return SOFAFile(url, 'r')
def eprint(*args, **kwargs):
from sys import stderr
print(*args, file = stderr, **kwargs)
if __name__ == "__main__":
main()
| 2,691
| 0
| 92
|
149445d25276388416e37946f3f84ca77c5a5c53
| 238
|
py
|
Python
|
geometry.py
|
kareltucek/digraph_browser
|
c5358aa0c6ff71e6959211770390b1467915807f
|
[
"MIT"
] | 3
|
2018-06-25T13:52:51.000Z
|
2021-12-01T09:51:56.000Z
|
geometry.py
|
kareltucek/digraph_browser
|
c5358aa0c6ff71e6959211770390b1467915807f
|
[
"MIT"
] | null | null | null |
geometry.py
|
kareltucek/digraph_browser
|
c5358aa0c6ff71e6959211770390b1467915807f
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
| 14
| 43
| 0.613445
|
import numpy as np
import math
def vector(x, y):
return np.array([x,y])
def zerovector():
return vector(0.0, 0.0)
def length(v):
return math.sqrt(v[0]*v[0] + v[1]*v[1])
def scale_to(vec, l):
return vec/length(vec)*l
| 113
| 0
| 92
|
03ee90dbd506d67950bf67048c286a932430249f
| 2,549
|
py
|
Python
|
auth.py
|
Yodart/banky
|
604c37ab80d95bb9f81d91534df512b20df5cd10
|
[
"MIT"
] | 1
|
2021-04-23T10:51:26.000Z
|
2021-04-23T10:51:26.000Z
|
auth.py
|
Yodart/banky
|
604c37ab80d95bb9f81d91534df512b20df5cd10
|
[
"MIT"
] | null | null | null |
auth.py
|
Yodart/banky
|
604c37ab80d95bb9f81d91534df512b20df5cd10
|
[
"MIT"
] | null | null | null |
from flask import Flask, Blueprint, request, jsonify, make_response, redirect, url_for
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
from db import db_connect
import datetime
import jwt
import sys
auth = Blueprint('auth', __name__)
@auth.route('/login')
@db_connect
| 39.828125
| 137
| 0.608474
|
from flask import Flask, Blueprint, request, jsonify, make_response, redirect, url_for
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
from db import db_connect
import datetime
import jwt
import sys
auth = Blueprint('auth', __name__)
def require_auth_token(f):
@wraps(f)
@db_connect
def decorated(db_cursor, db_connection, *args, **kwargs):
token = None
account = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'error': 'Missing auth token'}), 401
try:
data = jwt.decode(token, 'secret')
db_cursor.execute(
"SELECT id,name,last_name,account_number,balance FROM accounts WHERE account_number=%s", ([data['account_number']]))
account_data = db_cursor.fetchall()[0]
account = {'id': account_data[0],
'name': account_data[1],
'last_name': account_data[2],
'account_number': account_data[3],
'balance': account_data[4]}
except:
return jsonify({'error': 'Invalid auth token.'}), 401
return f(account, *args, **kwargs)
return decorated
@auth.route('/login')
@db_connect
def login(db_cursor, db_connection):
auth = request.authorization
if not auth or not auth.username or not auth.password:
return make_response('Could not verify', 401, {'WWW-Authenticate': 'Basic realm="Login required!"'})
try:
db_cursor.execute(
"SELECT id,name,last_name,account_number,balance,password FROM accounts WHERE account_number=%s", ([auth.username]))
account_data = db_cursor.fetchall()[0]
account = {'id': account_data[0],
'name': account_data[1],
'last_name': account_data[2],
'account_number': account_data[3],
'balance': account_data[4],
'password': account_data[5]}
if check_password_hash(account['password'], auth.password):
token = jwt.encode(
{'account_number': account['account_number'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=30)}, 'secret')
return jsonify({'token': token.decode('UTF-8')}), 200
return {'message': "Wrong Password"}, 401
except:
return {'error': "Unable to find account", "traceback": str(sys.exc_info())}, 401
| 2,178
| 0
| 45
|
bd3f48038a738c477c55e470123e442f51b07416
| 361
|
py
|
Python
|
yelp/urls.py
|
elizabethts/tally_ai_ds
|
20c63420a532d277e8832a11af75d5c4ffa9215c
|
[
"MIT"
] | 3
|
2020-04-01T22:17:48.000Z
|
2021-01-24T19:04:19.000Z
|
yelp/urls.py
|
elizabethts/tally_ai_ds
|
20c63420a532d277e8832a11af75d5c4ffa9215c
|
[
"MIT"
] | 8
|
2020-06-05T21:05:14.000Z
|
2021-12-13T20:43:05.000Z
|
yelp/urls.py
|
elizabethts/tally_ai_ds
|
20c63420a532d277e8832a11af75d5c4ffa9215c
|
[
"MIT"
] | 2
|
2020-04-11T20:14:13.000Z
|
2021-01-06T01:06:10.000Z
|
# yelp/urls.py
from django.urls import path
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
# view functions
from .views import hello
from .views import home
urlpatterns = {
path('', hello, name='hello'),
path('<slug:business_id>', home, name='home'),
}
urlpatterns = format_suffix_patterns(urlpatterns)
| 24.066667
| 61
| 0.759003
|
# yelp/urls.py
from django.urls import path
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
# view functions
from .views import hello
from .views import home
urlpatterns = {
path('', hello, name='hello'),
path('<slug:business_id>', home, name='home'),
}
urlpatterns = format_suffix_patterns(urlpatterns)
| 0
| 0
| 0
|
1e220db752c0243838f7243836c9a10b0fa287c8
| 3,661
|
py
|
Python
|
mini-projects/aic-15-image-search-engine/utils/model.py
|
elbertsoftware/SpringboardAIC
|
54278548c94d1a7a61ab977ecb47d96f73f94060
|
[
"Unlicense"
] | 3
|
2019-04-06T12:37:55.000Z
|
2021-01-28T01:38:45.000Z
|
mini-projects/aic-15-image-search-engine/utils/model.py
|
elbertsoftware/SpringboardAIC
|
54278548c94d1a7a61ab977ecb47d96f73f94060
|
[
"Unlicense"
] | 16
|
2020-03-24T18:22:38.000Z
|
2022-01-13T02:24:27.000Z
|
mini-projects/aic-15-image-search-engine/utils/model.py
|
elbertsoftware/SpringboardAIC
|
54278548c94d1a7a61ab977ecb47d96f73f94060
|
[
"Unlicense"
] | 1
|
2019-11-30T09:06:46.000Z
|
2019-11-30T09:06:46.000Z
|
import tensorflow as tf
def model_inputs(image_size):
'''
Defines CNN inputs (placeholders).
:param image_size: tuple, (height, width) of an image
'''
#-> [Batch_size, image_size[0], image_size[1], 3]
inputs = tf.placeholder(dtype=tf.float32, shape=[None, image_size[0], image_size[1], 3], name='images')
targets = tf.placeholder(dtype=tf.int32, shape=[None,], name='targets')
dropout_prob = tf.placeholder(dtype=tf.float32, name='dropout_probs')
return inputs, targets, dropout_prob
def conv_block(inputs,
number_of_filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=tf.nn.relu,
max_pool=True,
batch_norm=True):
'''
Defines convolutional block layer.
:param inputs: data from a previous layer
:param number_of_filters: integer, number of conv filters
:param kernel_size: tuple, size of conv layer kernel
:param padding: string, type of padding technique: SAME or VALID
:param activation: tf.object, activation function used on the layer
:param max_pool: boolean, if true the conv block will use max_pool
:param batch_norm: boolean, if true the conv block will use batch normalization
'''
conv_features = layer = tf.layers.conv2d(inputs=inputs,
filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
activation=activation)
if max_pool:
layer = tf.layers.max_pooling2d(layer,
pool_size=(2, 2),
strides=(2, 2),
padding='SAME')
if batch_norm:
layer = tf.layers.batch_normalization(layer)
return layer, conv_features
def dense_block(inputs,
units,
activation=tf.nn.relu,
dropout_rate=None,
batch_norm=True):
'''
Defines dense block layer.
:param inputs: data from a previous layer
:param units: integer, number of neurons/units for a dense layer
:param activation: tf.object, activation function used on the layer
:param dropout_rate: dropout rate used in this dense block
:param batch_norm: boolean, if true the conv block will use batch normalization
'''
dense_features = layer = tf.layers.dense(inputs,
units=units,
activation=activation)
if dropout_rate is not None:
layer = tf.layers.dropout(layer, rate=dropout_rate)
if batch_norm:
layer = tf.layers.batch_normalization(layer)
return layer, dense_features
def opt_loss(logits,
targets,
learning_rate):
'''
Defines model's optimizer and loss functions.
:param logits: pre-activated model outputs
:param targets: true labels for each input sample
:param learning_rate: learning_rate
'''
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
return loss, optimizer
| 35.543689
| 108
| 0.556405
|
import tensorflow as tf
def model_inputs(image_size):
'''
Defines CNN inputs (placeholders).
:param image_size: tuple, (height, width) of an image
'''
#-> [Batch_size, image_size[0], image_size[1], 3]
inputs = tf.placeholder(dtype=tf.float32, shape=[None, image_size[0], image_size[1], 3], name='images')
targets = tf.placeholder(dtype=tf.int32, shape=[None,], name='targets')
dropout_prob = tf.placeholder(dtype=tf.float32, name='dropout_probs')
return inputs, targets, dropout_prob
def conv_block(inputs,
number_of_filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=tf.nn.relu,
max_pool=True,
batch_norm=True):
'''
Defines convolutional block layer.
:param inputs: data from a previous layer
:param number_of_filters: integer, number of conv filters
:param kernel_size: tuple, size of conv layer kernel
:param padding: string, type of padding technique: SAME or VALID
:param activation: tf.object, activation function used on the layer
:param max_pool: boolean, if true the conv block will use max_pool
:param batch_norm: boolean, if true the conv block will use batch normalization
'''
conv_features = layer = tf.layers.conv2d(inputs=inputs,
filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
activation=activation)
if max_pool:
layer = tf.layers.max_pooling2d(layer,
pool_size=(2, 2),
strides=(2, 2),
padding='SAME')
if batch_norm:
layer = tf.layers.batch_normalization(layer)
return layer, conv_features
def dense_block(inputs,
units,
activation=tf.nn.relu,
dropout_rate=None,
batch_norm=True):
'''
Defines dense block layer.
:param inputs: data from a previous layer
:param units: integer, number of neurons/units for a dense layer
:param activation: tf.object, activation function used on the layer
:param dropout_rate: dropout rate used in this dense block
:param batch_norm: boolean, if true the conv block will use batch normalization
'''
dense_features = layer = tf.layers.dense(inputs,
units=units,
activation=activation)
if dropout_rate is not None:
layer = tf.layers.dropout(layer, rate=dropout_rate)
if batch_norm:
layer = tf.layers.batch_normalization(layer)
return layer, dense_features
def opt_loss(logits,
targets,
learning_rate):
'''
Defines model's optimizer and loss functions.
:param logits: pre-activated model outputs
:param targets: true labels for each input sample
:param learning_rate: learning_rate
'''
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
return loss, optimizer
| 0
| 0
| 0
|
49d827b18611982ce6bdabdd882d794ee52b953a
| 12,001
|
py
|
Python
|
QC/datasets/utils.py
|
phcavelar/graph-odenet
|
cba1224c041e53ea221e31bf9103ef950b8bd460
|
[
"MIT"
] | 4
|
2019-12-10T18:49:03.000Z
|
2022-02-16T03:21:30.000Z
|
QC/datasets/utils.py
|
phcavelar/graph-odenet
|
cba1224c041e53ea221e31bf9103ef950b8bd460
|
[
"MIT"
] | 1
|
2020-11-04T04:41:09.000Z
|
2021-01-07T18:52:37.000Z
|
QC/datasets/utils.py
|
phcavelar/graph-odenet
|
cba1224c041e53ea221e31bf9103ef950b8bd460
|
[
"MIT"
] | 2
|
2020-04-03T12:05:33.000Z
|
2020-10-10T11:57:48.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Adapted from https://github.com/priba/nmp_qc
"""
utils.py: Functions to process dataset graphs.
Usage:
"""
from __future__ import print_function
import rdkit
import torch
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import numpy as np
import shutil
import os
__author__ = "Pedro HC Avelar, Pau Riba, Anjan Dutta"
__email__ = "phcavelar@inf.ufrgs.br, priba@cvc.uab.cat, adutta@cvc.uab.cat"
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
pred = pred.type_as(target)
target = target.type_as(pred)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
#end collate_g_concat
#end collate_g_concat
#end collate_g_concat_dict
| 31.664908
| 177
| 0.548371
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Adapted from https://github.com/priba/nmp_qc
"""
utils.py: Functions to process dataset graphs.
Usage:
"""
from __future__ import print_function
import rdkit
import torch
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import numpy as np
import shutil
import os
__author__ = "Pedro HC Avelar, Pau Riba, Anjan Dutta"
__email__ = "phcavelar@inf.ufrgs.br, priba@cvc.uab.cat, adutta@cvc.uab.cat"
def qm9_nodes(g, hydrogen=False):
h = []
for n, d in g.nodes(data=True):
h_t = []
# Atom type (One-hot H, C, N, O F)
h_t += [int(d['a_type'] == x) for x in ['H', 'C', 'N', 'O', 'F']]
# Atomic number
h_t.append(d['a_num'])
# Partial Charge
h_t.append(d['pc'])
# Acceptor
h_t.append(d['acceptor'])
# Donor
h_t.append(d['donor'])
# Aromatic
h_t.append(int(d['aromatic']))
# Hybradization
h_t += [int(d['hybridization'] == x) for x in [rdkit.Chem.rdchem.HybridizationType.SP, rdkit.Chem.rdchem.HybridizationType.SP2, rdkit.Chem.rdchem.HybridizationType.SP3]]
# If number hydrogen is used as a
if hydrogen:
h_t.append(d['num_h'])
h.append(h_t)
return h
def qm9_edges(g, e_representation='raw_distance'):
remove_edges = []
e={}
for n1, n2, d in g.edges(data=True):
e_t = []
# Raw distance function
if e_representation == 'chem_graph':
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t += [i+1 for i, x in enumerate([rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC])
if x == d['b_type']]
elif e_representation == 'distance_bin':
if d['b_type'] is None:
step = (6-2)/8.0
start = 2
b = 9
for i in range(0, 9):
if d['distance'] < (start+i*step):
b = i
break
e_t.append(b+5)
else:
e_t += [i+1 for i, x in enumerate([rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC])
if x == d['b_type']]
elif e_representation == 'raw_distance':
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t.append(d['distance'])
e_t += [int(d['b_type'] == x) for x in [rdkit.Chem.rdchem.BondType.SINGLE, rdkit.Chem.rdchem.BondType.DOUBLE,
rdkit.Chem.rdchem.BondType.TRIPLE, rdkit.Chem.rdchem.BondType.AROMATIC]]
else:
print('Incorrect Edge representation transform')
quit()
if e_t:
e[(n1, n2)] = e_t
for edg in remove_edges:
g.remove_edge(*edg)
return nx.to_numpy_matrix(g), e
def normalize_data(data, mean, std):
data_norm = (data-mean)/std
return data_norm
def get_values(obj, start, end, prop):
vals = []
for i in range(start, end):
v = {}
if 'degrees' in prop:
v['degrees'] = set(sum(obj[i][0][0].sum(axis=0, dtype='int').tolist(), []))
if 'edge_labels' in prop:
v['edge_labels'] = set(sum(list(obj[i][0][2].values()), []))
if 'target_mean' in prop or 'target_std' in prop:
v['params'] = obj[i][1]
vals.append(v)
return vals
def get_graph_stats(graph_obj_handle, prop='degrees'):
# if prop == 'degrees':
num_cores = multiprocessing.cpu_count()
inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]
res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))
stat_dict = {}
if 'degrees' in prop:
stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))
if 'edge_labels' in prop:
stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))
if 'target_mean' in prop or 'target_std' in prop:
param = np.array([file_res['params'] for core_res in res for file_res in core_res])
if 'target_mean' in prop:
stat_dict['target_mean'] = np.mean(param, axis=0)
if 'target_std' in prop:
stat_dict['target_std'] = np.std(param, axis=0)
return stat_dict
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
pred = pred.type_as(target)
target = target.type_as(pred)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def collate_g_concat_edge_data(batch):
g_M = lambda g: g[0][0]
g_n = lambda g: g_M(g).shape[0]
g_x = lambda g: g[0][1]
g_x_d = lambda g: len(g_x(g)[0])
g_e = lambda g: g[0][2]
g_m = lambda g: len(g_e(g))
g_e_keys = lambda g: list(g_e(g).keys())
g_e_values = lambda g: list(g_e(g).values())
g_e_d = lambda g: len(g_e_values(g)[0])
g_o = lambda g: g[1]
g_o_d = lambda g: len(g[1])
n_d, e_d, o_d = g_x_d(batch[0]), g_e_d(batch[0]), g_o_d(batch[0])
N = 0
M = 0
batch_size = len(batch)
for g in batch:
n = g_n(g)
m = g_m(g)
N += n
M += m
#end for
G = np.zeros([N, N])
B = np.zeros([N], dtype=np.int64)
X = np.zeros([N, n_d])
E_d = np.zeros([2*M, e_d])
E_src = np.zeros([2*M], dtype=np.int64)
E_tgt = np.zeros([2*M,2], dtype=np.int64)
Y = np.zeros([batch_size, o_d])
n_acc = 0
m_acc = 0
for b, g in enumerate(batch):
n = g_n(g)
G[n_acc:n_acc+n,n_acc:n_acc+n] = g_M(g)
B[n_acc:n_acc+n] = b
X[n_acc:n_acc+n,:] = g_x(g)
for edge_id, edge in enumerate(sorted(g_e_keys(g))):
src, tgt = edge
src_edge_id = m_acc+edge_id
E_d[src_edge_id,:] = g_e(g)[edge]
E_src[src_edge_id] = src
E_tgt[src_edge_id,:] = [tgt,src_edge_id]
tgt_edge_id = M+m_acc+edge_id
E_d[tgt_edge_id,:] = g_e(g)[edge]
E_src[tgt_edge_id] = tgt
E_tgt[tgt_edge_id] = [src,tgt_edge_id]
#end for
Y[b] = g_o(g)
n_acc+=n
m_acc+=g_m(g)
#end for
G = torch.FloatTensor(G)
B = torch.LongTensor(B)
X = torch.FloatTensor(X)
E_d = torch.FloatTensor(E_d)
E_src = torch.LongTensor(E_src)
E_tgt = torch.sparse.FloatTensor(torch.LongTensor(E_tgt.transpose()),torch.FloatTensor(np.ones(2*M)),torch.Size([N,2*M])).to_dense()
Y = torch.FloatTensor(Y)
return batch_size,G,B,X,E_d,E_src,E_tgt,Y
#end collate_g_concat
def collate_g_concat(batch):
g_M = lambda g: g[0][0]
g_n = lambda g: g_M(g).shape[0]
g_x = lambda g: g[0][1]
g_x_d = lambda g: len(g_x(g)[0])
g_e = lambda g: g[0][2]
g_m = lambda g: len(g_e(g))
g_e_keys = lambda g: list(g_e(g).keys())
g_e_values = lambda g: list(g_e(g).values())
g_e_d = lambda g: len(g_e_values(g)[0])
g_o = lambda g: g[1]
g_o_d = lambda g: len(g[1])
n_d, e_d, o_d = g_x_d(batch[0]), g_e_d(batch[0]), g_o_d(batch[0])
N = 0
M = 0
batch_size = len(batch)
for g in batch:
n = g_n(g)
m = g_m(g)
N += n
M += m
#end for
G = np.zeros([N, N])
B = np.zeros([N], dtype=np.int64)
X = np.zeros([N, n_d])
E_d = np.zeros([N, N, e_d])
E_i = np.zeros([M, 2], dtype=np.int64)
Y = np.zeros([batch_size, o_d])
n_acc = 0
for b, g in enumerate(batch):
n = g_n(g)
G[n_acc:n_acc+n,n_acc:n_acc+n] = g_M(g)
B[n_acc:n_acc+n] = b
X[n_acc:n_acc+n,:] = g_x(g)
for edge_id, edge in enumerate(sorted(g_e_keys(g))):
src, tgt = edge
E_i[edge_id,:] = [min(src,tgt),max(src,tgt)]
E_d[n_acc+src,n_acc+tgt,:] = g_e(g)[edge]
E_d[n_acc+tgt,n_acc+src,:] = g_e(g)[edge]
#end for
Y[b] = g_o(g)
n_acc+=n
#end for
G = torch.FloatTensor(G)
B = torch.LongTensor(B)
X = torch.FloatTensor(X)
E_d = torch.FloatTensor(E_d)
E_i = torch.LongTensor(E_i)
Y = torch.FloatTensor(Y)
return G,B,X,E_d,E_i,Y
#end collate_g_concat
def collate_g_concat_dict(batch):
g_M = lambda g: g[0][0]
g_n = lambda g: g_M(g).shape[0]
g_x = lambda g: g[0][1]
g_x_d = lambda g: len(g_x(g)[0])
g_e = lambda g: g[0][2]
g_e_keys = lambda g: list(g_e(g).keys())
g_e_values = lambda g: list(g_e(g).values())
g_e_d = lambda g: len(g_e_values(g)[0])
g_o = lambda g: g[1]
g_o_d = lambda g: len(g[1])
n_d, e_d, o_d = g_x_d(batch[0]), g_e_d(batch[0]), g_o_d(batch[0])
N = 0
batch_size = len(batch)
for g in batch:
M = g_M(g)
n = g_n(g)
N += n
#end for
G = np.zeros([N, N])
B = np.zeros([N], dtype=np.int64)
X = np.zeros([N, n_d])
E = {}
Y = np.zeros([batch_size, o_d])
n_acc = 0
print( "bla" )
for b, g in enumerate(batch):
n = g_n(g)
G[n_acc:n_acc+n,n_acc:n_acc+n] = g_M(g)
B[n_acc:n_acc+n] = b
X[n_acc:n_acc+n,:] = g_x(g)
for edge in g_e_keys(g):
src,tgt=edge
E[n_acc+src,n_acc+tgt] = g_e(g)[edge]
E[n_acc+tgt,n_acc+src] = g_e(g)[edge]
#end for
Y[b] = g_o(g)
n_acc+=n
#end for
G = torch.FloatTensor(G)
B = torch.LongTensor(B)
X = torch.FloatTensor(X)
print( "ble" )
for k in E.keys():
E[k] = torch.FloatTensor(E[k])
#end for
Y = torch.FloatTensor(Y)
return G,B,X,E,Y
#end collate_g_concat_dict
def collate_g(batch):
batch_sizes = np.max(np.array([[len(input_b[1]), len(input_b[1][0]), len(input_b[2]),
len(list(input_b[2].values())[0])]
if input_b[2] else
[len(input_b[1]), len(input_b[1][0]), 0,0]
for (input_b, target_b) in batch]), axis=0)
g = np.zeros((len(batch), batch_sizes[0], batch_sizes[0]))
h = np.zeros((len(batch), batch_sizes[0], batch_sizes[1]))
e = np.zeros((len(batch), batch_sizes[0], batch_sizes[0], batch_sizes[3]))
target = np.zeros((len(batch), len(batch[0][1])))
for i in range(len(batch)):
num_nodes = len(batch[i][0][1])
# Adjacency matrix
g[i, 0:num_nodes, 0:num_nodes] = batch[i][0][0]
# Node features
h[i, 0:num_nodes, :] = batch[i][0][1]
# Edges
for edge in batch[i][0][2].keys():
e[i, edge[0], edge[1], :] = batch[i][0][2][edge]
e[i, edge[1], edge[0], :] = batch[i][0][2][edge]
# Target
target[i, :] = batch[i][1]
g = torch.FloatTensor(g)
h = torch.FloatTensor(h)
e = torch.FloatTensor(e)
target = torch.FloatTensor(target)
return g, h, e, target
def save_checkpoint(state, is_best, directory):
if not os.path.isdir(directory):
os.makedirs(directory)
checkpoint_file = os.path.join(directory, 'checkpoint.pth')
best_model_file = os.path.join(directory, 'model_best.pth')
torch.save(state, checkpoint_file)
if is_best:
shutil.copyfile(checkpoint_file, best_model_file)
| 10,692
| 0
| 230
|
50691cf6b0a343a60746294d17b9d62fdcf4fa2d
| 675
|
py
|
Python
|
__init__.py
|
SachaSchwarz/whitepeaks
|
fd6d14b47f6fb80516d9a06941d79b23d8314e50
|
[
"MIT"
] | null | null | null |
__init__.py
|
SachaSchwarz/whitepeaks
|
fd6d14b47f6fb80516d9a06941d79b23d8314e50
|
[
"MIT"
] | null | null | null |
__init__.py
|
SachaSchwarz/whitepeaks
|
fd6d14b47f6fb80516d9a06941d79b23d8314e50
|
[
"MIT"
] | null | null | null |
'''
###############################################################################
Ultrafast Quantum Optics Package
###############################################################################
Quantum Optics and Quantum Information Group
Written by
> Jean-Philippe MacLean: jpmaclean@uwaterloo.ca
> Sacha Schwarz sacha.schwarz@uwaterloo.ca
'''
#Initialize modules
from .whitepeaks.analytics import *
from .whitepeaks.interface import *
from .whitepeaks.methods import *
from .whitepeaks.states import *
#Define constants
c=0.299792458 #Speed of light in um/fs or mm/ps
import warnings
warnings.filterwarnings("ignore")
from timeit import default_timer as time
| 27
| 79
| 0.601481
|
'''
###############################################################################
Ultrafast Quantum Optics Package
###############################################################################
Quantum Optics and Quantum Information Group
Written by
> Jean-Philippe MacLean: jpmaclean@uwaterloo.ca
> Sacha Schwarz sacha.schwarz@uwaterloo.ca
'''
#Initialize modules
from .whitepeaks.analytics import *
from .whitepeaks.interface import *
from .whitepeaks.methods import *
from .whitepeaks.states import *
#Define constants
c=0.299792458 #Speed of light in um/fs or mm/ps
import warnings
warnings.filterwarnings("ignore")
from timeit import default_timer as time
| 0
| 0
| 0
|
840d8ae323c0daf347b4937320a2c80c2d7c1d9f
| 7,645
|
py
|
Python
|
app/switch/controller.py
|
conatel-i-d/sm-api
|
1a57e8303ae5f33ae4c8ac8247449fac5b0c848d
|
[
"MIT"
] | 1
|
2020-09-20T07:44:33.000Z
|
2020-09-20T07:44:33.000Z
|
app/switch/controller.py
|
conatel-i-d/sm-api
|
1a57e8303ae5f33ae4c8ac8247449fac5b0c848d
|
[
"MIT"
] | 2
|
2019-12-10T13:00:36.000Z
|
2021-04-30T21:04:42.000Z
|
app/switch/controller.py
|
conatel-i-d/sm-api
|
1a57e8303ae5f33ae4c8ac8247449fac5b0c848d
|
[
"MIT"
] | null | null | null |
import os, sys
from flask import request
from flask_restplus import Namespace, Resource, fields
from flask.wrappers import Response
from app.utils.async_action import async_action
from app.api_response import ApiResponse
from app.errors import ApiException, JobTemplateNotFound, PlaybookFailure, PlaybookTimeout, SwitchNotFound
from .service import SwitchService
from .model import Switch
from .interfaces import SwitchInterfaces
from app.utils.authorization import authorize
from app.utils.logger import log
from app.utils.b64 import decode
from app.macs.service import MacService
api_description = """
Representación de los switches de la empresa.
"""
api = Namespace('Switch', description=api_description)
interfaces = SwitchInterfaces(api)
@api.route("/")
@api.response(400, 'Bad Request', interfaces.error_response_model)
@api.doc(responses={
401: 'Unauthorized',
403: 'Forbidden',
500: 'Internal server error',
502: 'Bad Gateway',
503: 'Service Unavailable',
})
class SwitchResource(Resource):
"""
Switch Resource
"""
@api.response(200, 'Lista de Switches', interfaces.many_response_model)
@log
@async_action
@authorize
async def get(self):
"""
Devuelve la lista de Switches
"""
try:
entities = await SwitchService.get_all()
return ApiResponse(interfaces.many_schema.dump(entities).data)
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
@api.expect(interfaces.create_model)
@api.response(200, 'Nuevo Switch', interfaces.single_response_model)
@log
@authorize
def post(self):
"""
Crea un nuevo Switch.
"""
json_data = request.get_json()
if json_data is None:
raise ApiException('JSON body is undefined')
body = interfaces.single_schema.load(json_data).data
Switch = SwitchService.create(body)
return ApiResponse(interfaces.single_schema.dump(Switch).data)
@api.expect(interfaces.update_model)
@api.response(200, 'Switches Actualizados', interfaces.many_response_model)
@log
@authorize
def put(self, id: int):
"""
Actualiza un batch de Switches por su ID.
"""
json_data = request.get_json()
sw_updated = []
for item in json_data:
sw = interfaces.single_schema.load(request.json).data
sw_updated.append(SwitchService.update(id, sw))
return ApiResponse(interfaces.many_schema.dump(sw_updated).data)
@api.route("/<int:id>")
@api.param("id", "Identificador único del Switch")
@api.response(400, 'Bad Request', interfaces.error_response_model)
@api.doc(responses={
401: 'Unauthorized',
403: 'Forbidden',
500: 'Internal server error',
502: 'Bad Gateway',
503: 'Service Unavailable',
})
@api.route("/inventory")
@api.response(400, 'Bad Request', interfaces.error_response_model)
@api.doc(responses={
401: 'Unauthorized',
403: 'Forbidden',
500: 'Internal server error',
502: 'Bad Gateway',
503: 'Service Unavailable',
})
class SwitchInventoryResource(Resource):
"""
Inventory switch Resource
"""
@api.response(200, 'Inventario con lista de swithces')
@async_action
async def get(self):
"""
Devuelve la lista de Switches
"""
try:
entities = await SwitchService.get_all()
ansible_switches_vars = {}
for x in entities:
ansible_switches_vars[x.name] = {
"ansible_host": x.ip,
"ansible_become": True,
"ansible_become_method": "enable",
"ansible_connection": "network_cli",
"ansible_network_os": "ios",
"ansible_port": x.ansible_ssh_port or 22,
"ansible_user": decode(x.ansible_user),
"ansible_ssh_pass": decode(x.ansible_ssh_pass)
}
ansible_switches_hostnames = map(lambda x : x.name, entities)
sw_inv = {
'group': {
'hosts': list(ansible_switches_hostnames),
},
'_meta': {
'hostvars': ansible_switches_vars
}
}
return ApiResponse(sw_inv)
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
@api.route("/<int:id>/macs")
@api.param("id", "Identificador único del Switch")
class SwitchMacResource(Resource):
"""
Mac Resource
"""
@api.response(200, 'Lista de Interfaces con sus respectivas macs', interfaces.many_response_model)
@log
@async_action
@authorize
async def get(self, switch_id: int):
"""
Devuelve la lista de todaslas macs del switch
"""
try:
resp = await MacService.get(switch_id)
return ApiResponse(resp)
except SwitchNotFound:
raise ApiException(f'No se encuentra un switch con el id:{switch_id}')
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
| 35.230415
| 106
| 0.63898
|
import os, sys
from flask import request
from flask_restplus import Namespace, Resource, fields
from flask.wrappers import Response
from app.utils.async_action import async_action
from app.api_response import ApiResponse
from app.errors import ApiException, JobTemplateNotFound, PlaybookFailure, PlaybookTimeout, SwitchNotFound
from .service import SwitchService
from .model import Switch
from .interfaces import SwitchInterfaces
from app.utils.authorization import authorize
from app.utils.logger import log
from app.utils.b64 import decode
from app.macs.service import MacService
api_description = """
Representación de los switches de la empresa.
"""
api = Namespace('Switch', description=api_description)
interfaces = SwitchInterfaces(api)
@api.route("/")
@api.response(400, 'Bad Request', interfaces.error_response_model)
@api.doc(responses={
401: 'Unauthorized',
403: 'Forbidden',
500: 'Internal server error',
502: 'Bad Gateway',
503: 'Service Unavailable',
})
class SwitchResource(Resource):
"""
Switch Resource
"""
@api.response(200, 'Lista de Switches', interfaces.many_response_model)
@log
@async_action
@authorize
async def get(self):
"""
Devuelve la lista de Switches
"""
try:
entities = await SwitchService.get_all()
return ApiResponse(interfaces.many_schema.dump(entities).data)
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
@api.expect(interfaces.create_model)
@api.response(200, 'Nuevo Switch', interfaces.single_response_model)
@log
@authorize
def post(self):
"""
Crea un nuevo Switch.
"""
json_data = request.get_json()
if json_data is None:
raise ApiException('JSON body is undefined')
body = interfaces.single_schema.load(json_data).data
Switch = SwitchService.create(body)
return ApiResponse(interfaces.single_schema.dump(Switch).data)
@api.expect(interfaces.update_model)
@api.response(200, 'Switches Actualizados', interfaces.many_response_model)
@log
@authorize
def put(self, id: int):
"""
Actualiza un batch de Switches por su ID.
"""
json_data = request.get_json()
sw_updated = []
for item in json_data:
sw = interfaces.single_schema.load(request.json).data
sw_updated.append(SwitchService.update(id, sw))
return ApiResponse(interfaces.many_schema.dump(sw_updated).data)
@api.route("/<int:id>")
@api.param("id", "Identificador único del Switch")
@api.response(400, 'Bad Request', interfaces.error_response_model)
@api.doc(responses={
401: 'Unauthorized',
403: 'Forbidden',
500: 'Internal server error',
502: 'Bad Gateway',
503: 'Service Unavailable',
})
class SwitchIdResource(Resource):
@api.response(200, 'Switch', interfaces.single_response_model)
@log
@async_action
@authorize
async def get(self, id: int):
"""
Obtiene un único Switch por ID.
"""
try:
switch = await SwitchService.get_by_id(id)
return ApiResponse(interfaces.single_schema.dump(switch).data)
except SwitchNotFound:
raise ApiException(f'No se encuentra un switch con el id:{id}')
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
@api.response(204, 'No Content')
@log
@authorize
def delete(self, id: int) -> Response:
"""
Elimina un único Switch por ID.
"""
from flask import jsonify
id = SwitchService.delete_by_id(id)
return ApiResponse(None, 204)
@api.expect(interfaces.update_model)
@api.response(200, 'Switch Actualizado', interfaces.single_response_model)
@log
@authorize
def put(self, id: int):
"""
Actualiza un único Switch por ID.
"""
try:
body = interfaces.single_schema.load(request.json).data
Switch = SwitchService.update(id, body)
return ApiResponse(interfaces.single_schema.dump(Switch).data)
except SwitchNotFound:
raise ApiException(f'No se encuentra un switch con el id:{id}')
@api.route("/inventory")
@api.response(400, 'Bad Request', interfaces.error_response_model)
@api.doc(responses={
401: 'Unauthorized',
403: 'Forbidden',
500: 'Internal server error',
502: 'Bad Gateway',
503: 'Service Unavailable',
})
class SwitchInventoryResource(Resource):
"""
Inventory switch Resource
"""
@api.response(200, 'Inventario con lista de swithces')
@async_action
async def get(self):
"""
Devuelve la lista de Switches
"""
try:
entities = await SwitchService.get_all()
ansible_switches_vars = {}
for x in entities:
ansible_switches_vars[x.name] = {
"ansible_host": x.ip,
"ansible_become": True,
"ansible_become_method": "enable",
"ansible_connection": "network_cli",
"ansible_network_os": "ios",
"ansible_port": x.ansible_ssh_port or 22,
"ansible_user": decode(x.ansible_user),
"ansible_ssh_pass": decode(x.ansible_ssh_pass)
}
ansible_switches_hostnames = map(lambda x : x.name, entities)
sw_inv = {
'group': {
'hosts': list(ansible_switches_hostnames),
},
'_meta': {
'hostvars': ansible_switches_vars
}
}
return ApiResponse(sw_inv)
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
@api.route("/<int:id>/macs")
@api.param("id", "Identificador único del Switch")
class SwitchMacResource(Resource):
"""
Mac Resource
"""
@api.response(200, 'Lista de Interfaces con sus respectivas macs', interfaces.many_response_model)
@log
@async_action
@authorize
async def get(self, switch_id: int):
"""
Devuelve la lista de todaslas macs del switch
"""
try:
resp = await MacService.get(switch_id)
return ApiResponse(resp)
except SwitchNotFound:
raise ApiException(f'No se encuentra un switch con el id:{switch_id}')
except JobTemplateNotFound:
raise ApiException('No existe un playbook para obtener la infrmación de las interfaces')
except PlaybookTimeout:
raise ApiException('La ejecución de la tarea supero el tiempo del timeout')
except PlaybookFailure:
raise ApiException('Fallo la ejecución de la tarea')
| 0
| 1,669
| 22
|
5ccb6a24d0e76921694aa036b37017f07029dff1
| 7,057
|
py
|
Python
|
linlearn/estimator/tmean.py
|
LinLearn/linlearn
|
de5752d47bbe8e2fb62d41b0dcf2526f87545e1c
|
[
"BSD-3-Clause"
] | null | null | null |
linlearn/estimator/tmean.py
|
LinLearn/linlearn
|
de5752d47bbe8e2fb62d41b0dcf2526f87545e1c
|
[
"BSD-3-Clause"
] | null | null | null |
linlearn/estimator/tmean.py
|
LinLearn/linlearn
|
de5752d47bbe8e2fb62d41b0dcf2526f87545e1c
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com>
# Ibrahim Merad <imerad7@gmail.com>
# License: BSD 3 clause
"""
This module implement the ``TMean`` class for the trimmed-means robust estimator.
`StateTMean` is a place-holder for the TMean estimator containing:
"""
from collections import namedtuple
import numpy as np
from numba import jit
from ._base import Estimator, jit_kwargs
from .._utils import np_float, trimmed_mean, fast_trimmed_mean
StateTMean = namedtuple(
"StateTMean",
[
"deriv_samples",
"deriv_samples_outer_prods",
"gradient",
"loss_derivative",
"partial_derivative",
],
)
class TMean(Estimator):
"""Trimmed-mean estimator"""
| 37.94086
| 128
| 0.54655
|
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com>
# Ibrahim Merad <imerad7@gmail.com>
# License: BSD 3 clause
"""
This module implement the ``TMean`` class for the trimmed-means robust estimator.
`StateTMean` is a place-holder for the TMean estimator containing:
"""
from collections import namedtuple
import numpy as np
from numba import jit
from ._base import Estimator, jit_kwargs
from .._utils import np_float, trimmed_mean, fast_trimmed_mean
StateTMean = namedtuple(
"StateTMean",
[
"deriv_samples",
"deriv_samples_outer_prods",
"gradient",
"loss_derivative",
"partial_derivative",
],
)
class TMean(Estimator):
"""Trimmed-mean estimator"""
def __init__(self, X, y, loss, n_classes, fit_intercept, percentage):
Estimator.__init__(self, X, y, loss, n_classes, fit_intercept)
self.percentage = percentage
# Number of samples excluded from both tails (left and right)
self.n_excluded_tails = max(1, int(len(X) * percentage))
self.one_hot_cols = np.sum(X == 0.0, axis=0) > X.shape[0]/20
if fit_intercept:
self.one_hot_cols = np.insert(self.one_hot_cols, 0, False)
def get_state(self):
return StateTMean(
deriv_samples=np.empty(
(self.n_samples, self.n_classes), dtype=np_float, order="F"
),
deriv_samples_outer_prods=np.empty(
(self.n_samples, self.n_classes), dtype=np_float, order="F"
),
gradient=np.empty(
(self.n_features + int(self.fit_intercept), self.n_classes),
dtype=np_float,
order="F",
),
loss_derivative=np.empty(self.n_classes, dtype=np_float),
partial_derivative=np.empty(self.n_classes, dtype=np_float),
)
def partial_deriv_factory(self):
X = self.X
y = self.y
loss = self.loss
deriv_loss = loss.deriv_factory()
n_samples = self.n_samples
n_classes = self.n_classes
one_hot_cols = self.one_hot_cols
n_excluded_tails = self.n_excluded_tails
if self.fit_intercept:
@jit(**jit_kwargs)
def partial_deriv(j, inner_products, state):
deriv_samples = state.deriv_samples
partial_derivative = state.partial_derivative
if j == 0:
for i in range(n_samples):
deriv_loss(y[i], inner_products[i], deriv_samples[i])
else:
for i in range(n_samples):
deriv_loss(y[i], inner_products[i], deriv_samples[i])
for k in range(n_classes):
deriv_samples[i, k] *= X[i, j - 1]
# TODO: Hand-made mean ?
# TODO: Try out different sorting mechanisms, since at some point the
# sorting order won't change much...
if one_hot_cols[j]:
for k in range(n_classes):
partial_derivative[k] = trimmed_mean(deriv_samples[:, k], n_samples, n_excluded_tails)
else:
for k in range(n_classes):
partial_derivative[k] = fast_trimmed_mean(deriv_samples[:, k], n_samples, n_excluded_tails)
return partial_deriv
else:
@jit(**jit_kwargs)
def partial_deriv(j, inner_products, state):
deriv_samples = state.deriv_samples
partial_derivative = state.partial_derivative
for i in range(n_samples):
deriv_loss(y[i], inner_products[i], deriv_samples[i])
for k in range(n_classes):
deriv_samples[i, k] *= X[i, j]
if one_hot_cols[j]:
for k in range(n_classes):
partial_derivative[k] = trimmed_mean(deriv_samples[:, k], n_samples, n_excluded_tails)
else:
for k in range(n_classes):
partial_derivative[k] = fast_trimmed_mean(deriv_samples[:, k], n_samples, n_excluded_tails)
return partial_deriv
def grad_factory(self):
X = self.X
y = self.y
loss = self.loss
deriv_loss = loss.deriv_factory()
n_samples = self.n_samples
n_features = self.n_features
n_classes = self.n_classes
one_hot_cols = self.one_hot_cols
n_excluded_tails = self.n_excluded_tails
if self.fit_intercept:
@jit(**jit_kwargs)
def grad(inner_products, state):
deriv_samples = state.deriv_samples
deriv_samples_outer_prods = state.deriv_samples_outer_prods
gradient = state.gradient
for i in range(n_samples):
deriv_loss(y[i], inner_products[i], deriv_samples[i])
for k in range(n_classes):
deriv_samples_outer_prods[i, k] = deriv_samples[i, k]
for k in range(n_classes):
gradient[0, k] = fast_trimmed_mean(deriv_samples_outer_prods[:, k], n_samples, n_excluded_tails)
# gradient[0, k] = trimmed_mean(deriv_samples_outer_prods[:, k], n_samples, n_excluded_tails)
for k in range(n_classes):
for j in range(n_features):
for i in range(n_samples):
deriv_samples_outer_prods[i, k] = (
deriv_samples[i, k] * X[i, j]
)
if one_hot_cols[j]:
gradient[j + 1, k] = trimmed_mean(deriv_samples_outer_prods[:, k], n_samples, n_excluded_tails)
else:
gradient[j + 1, k] = fast_trimmed_mean(deriv_samples_outer_prods[:, k], n_samples, n_excluded_tails)
return grad
else:
@jit(**jit_kwargs)
def grad(inner_products, state):
deriv_samples = state.deriv_samples
deriv_samples_outer_prods = state.deriv_samples_outer_prods
gradient = state.gradient
for i in range(n_samples):
deriv_loss(y[i], inner_products[i], deriv_samples[i])
for j in range(n_features):
if one_hot_cols[j]:
tmean_fct = trimmed_mean
else:
tmean_fct = fast_trimmed_mean
for k in range(n_classes):
for i in range(n_samples):
deriv_samples_outer_prods[i, k] = (
deriv_samples[i, k] * X[i, j]
)
gradient[j, k] = tmean_fct(deriv_samples_outer_prods[:, k], n_samples, n_excluded_tails)
return 0
return grad
| 6,224
| 0
| 108
|
9c53f7fb441dbb00045923c0e4b9ace6c6b0a5b3
| 2,427
|
py
|
Python
|
bauh/gems/arch/confirmation.py
|
octopusSD/bauh
|
c1f210fef87ddb4614b201ec2030330b71e43fe4
|
[
"Zlib"
] | 1
|
2020-02-20T05:08:46.000Z
|
2020-02-20T05:08:46.000Z
|
bauh/gems/arch/confirmation.py
|
octopusSD/bauh
|
c1f210fef87ddb4614b201ec2030330b71e43fe4
|
[
"Zlib"
] | null | null | null |
bauh/gems/arch/confirmation.py
|
octopusSD/bauh
|
c1f210fef87ddb4614b201ec2030330b71e43fe4
|
[
"Zlib"
] | null | null | null |
from typing import Set, List, Tuple
from bauh.api.abstract.handler import ProcessWatcher
from bauh.api.abstract.view import MultipleSelectComponent, InputOption
from bauh.commons import resource
from bauh.commons.html import bold
from bauh.gems.arch import ROOT_DIR
from bauh.view.util.translation import I18n
| 44.944444
| 186
| 0.619695
|
from typing import Set, List, Tuple
from bauh.api.abstract.handler import ProcessWatcher
from bauh.api.abstract.view import MultipleSelectComponent, InputOption
from bauh.commons import resource
from bauh.commons.html import bold
from bauh.gems.arch import ROOT_DIR
from bauh.view.util.translation import I18n
def _get_mirror_icon(mirror: str):
return resource.get_path('img/{}.svg'.format('arch' if mirror == 'aur' else 'mirror'), ROOT_DIR)
def request_optional_deps(pkgname: str, pkg_mirrors: dict, watcher: ProcessWatcher, i18n: I18n) -> Set[str]:
opts = []
for p, d in pkg_mirrors.items():
op = InputOption('{}{} ( {}: {} )'.format(p, ': ' + d['desc'] if d['desc'] else '', i18n['repository'], d['mirror'].upper()), p)
op.icon_path = _get_mirror_icon(d['mirror'])
opts.append(op)
view_opts = MultipleSelectComponent(label='',
options=opts,
default_options=None)
install = watcher.request_confirmation(title=i18n['arch.install.optdeps.request.title'],
body='<p>{}.</p><p>{}:</p>'.format(i18n['arch.install.optdeps.request.body'].format(bold(pkgname)), i18n['arch.install.optdeps.request.help']),
components=[view_opts],
confirmation_label=i18n['install'].capitalize(),
deny_label=i18n['do_not.install'].capitalize())
if install:
return {o.value for o in view_opts.values}
def request_install_missing_deps(pkgname: str, deps: List[Tuple[str, str]], watcher: ProcessWatcher, i18n: I18n) -> bool:
msg = '<p>{}</p>'.format(i18n['arch.missing_deps.body'].format(name=bold(pkgname) if pkgname else '', deps=bold(str(len(deps)))))
opts = []
sorted_deps = [*deps]
sorted_deps.sort(key=lambda e: e[0])
for dep in sorted_deps:
op = InputOption('{} ( {}: {} )'.format(dep[0], i18n['repository'], dep[1].upper()), dep[0])
op.read_only = True
op.icon_path = _get_mirror_icon(dep[1])
opts.append(op)
comp = MultipleSelectComponent(label='', options=opts, default_options=set(opts))
return watcher.request_confirmation(i18n['arch.missing_deps.title'], msg, [comp], confirmation_label=i18n['continue'].capitalize(), deny_label=i18n['cancel'].capitalize())
| 2,044
| 0
| 69
|
8c8b782e7893e9058796384c46f91ed54d54153c
| 154
|
py
|
Python
|
tests/unit/equality/test_effective_action.py
|
etta-trust/PolicyGlass
|
72157189a9af3172e6efbdcc2050969796cfa99f
|
[
"MIT"
] | 49
|
2021-12-21T23:15:55.000Z
|
2022-03-28T09:38:30.000Z
|
tests/unit/equality/test_effective_action.py
|
etta-trust/PolicyGlass
|
72157189a9af3172e6efbdcc2050969796cfa99f
|
[
"MIT"
] | 3
|
2021-12-23T22:02:02.000Z
|
2022-01-10T14:16:24.000Z
|
tests/unit/equality/test_effective_action.py
|
etta-trust/PolicyGlass
|
72157189a9af3172e6efbdcc2050969796cfa99f
|
[
"MIT"
] | 1
|
2022-02-22T11:03:27.000Z
|
2022-02-22T11:03:27.000Z
|
from policyglass import Action, EffectiveAction
| 25.666667
| 77
| 0.746753
|
from policyglass import Action, EffectiveAction
def test_equality_true():
assert EffectiveAction(Action("s3:*")) == EffectiveAction(Action("s3:*"))
| 82
| 0
| 23
|
f370feeb4f230cf0228e838425c9a052204e009d
| 447
|
py
|
Python
|
tests/test_model.py
|
rychallener/TauREx3_public
|
eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_model.py
|
rychallener/TauREx3_public
|
eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_model.py
|
rychallener/TauREx3_public
|
eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import shutil
import tempfile
from os import path
from unittest.mock import patch, mock_open
from taurex.model.model import ForwardModel
from taurex.model.simplemodel import SimpleForwardModel
import numpy as np
import pickle
| 19.434783
| 55
| 0.782998
|
import unittest
import shutil
import tempfile
from os import path
from unittest.mock import patch, mock_open
from taurex.model.model import ForwardModel
from taurex.model.simplemodel import SimpleForwardModel
import numpy as np
import pickle
class ForwardModelTest(unittest.TestCase):
def test_init(self):
pass
class SimpleForwardModelTest(unittest.TestCase):
def test_init(self):
model = SimpleForwardModel('test')
| 54
| 48
| 100
|
76070ea40a45d68adecb44f6e49e28cbf613905d
| 610
|
py
|
Python
|
LeetCode_589.py
|
xulu199705/LeetCode
|
9a654a10117a93f9ad9728d6b86eb3713185545e
|
[
"MIT"
] | null | null | null |
LeetCode_589.py
|
xulu199705/LeetCode
|
9a654a10117a93f9ad9728d6b86eb3713185545e
|
[
"MIT"
] | null | null | null |
LeetCode_589.py
|
xulu199705/LeetCode
|
9a654a10117a93f9ad9728d6b86eb3713185545e
|
[
"MIT"
] | null | null | null |
from typing import List
# 迭代先序遍历
| 22.592593
| 58
| 0.503279
|
from typing import List
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
# 迭代先序遍历
class Solution:
def preorder(self, root: 'Node') -> List[int]:
if root == None:
return []
ans = []
stack = []
stack.append(root)
while stack:
tmp = stack.pop()
ans.append(tmp.val)
# print(ans)
for i in range(len(tmp.children) - 1, -1, -1):
stack.append(tmp.children[i])
# print([it.val for it in stack])
return ans
| 495
| -16
| 97
|
81c9590de251767964ac11b32b67667e2d773702
| 448
|
py
|
Python
|
Examples/Session09/yield_example.py
|
Sharmila8/intropython2016
|
a69aa6f6d0cd28c6a29d0b8adb9ef1ff9e2e8479
|
[
"Unlicense"
] | null | null | null |
Examples/Session09/yield_example.py
|
Sharmila8/intropython2016
|
a69aa6f6d0cd28c6a29d0b8adb9ef1ff9e2e8479
|
[
"Unlicense"
] | null | null | null |
Examples/Session09/yield_example.py
|
Sharmila8/intropython2016
|
a69aa6f6d0cd28c6a29d0b8adb9ef1ff9e2e8479
|
[
"Unlicense"
] | null | null | null |
# if __name__ == '__main__':
# print "the generator function:"
# print repr(counter)
# print "call generator function"
# c = counter()
# print "the generator:"
# print repr(c)
# print 'iterate'
# for item in c:
# print 'received:', item
| 19.478261
| 38
| 0.540179
|
def counter():
print('counter: starting counter')
i = -3
while i < 3:
i = i + 1
print('counter: yield', i)
yield i
return None
# if __name__ == '__main__':
# print "the generator function:"
# print repr(counter)
# print "call generator function"
# c = counter()
# print "the generator:"
# print repr(c)
# print 'iterate'
# for item in c:
# print 'received:', item
| 145
| 0
| 22
|
ea7cde7eec2b5e8c99aacc733efb17738221bb81
| 4,318
|
py
|
Python
|
startrek-py/startrek/net/hub.py
|
moky/wormhole
|
bbfcfce13eb0ee86f8bb006deb7a3881173352c2
|
[
"MIT"
] | 5
|
2020-05-24T03:35:00.000Z
|
2021-06-05T00:27:54.000Z
|
startrek-py/startrek/net/hub.py
|
moky/wormhole
|
bbfcfce13eb0ee86f8bb006deb7a3881173352c2
|
[
"MIT"
] | null | null | null |
startrek-py/startrek/net/hub.py
|
moky/wormhole
|
bbfcfce13eb0ee86f8bb006deb7a3881173352c2
|
[
"MIT"
] | 2
|
2020-09-11T05:29:11.000Z
|
2022-03-13T15:45:22.000Z
|
# -*- coding: utf-8 -*-
#
# Star Trek: Interstellar Transport
#
# Written in 2021 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2021 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import socket
from abc import ABC, abstractmethod
from typing import Optional, Set
from ..fsm import Processor
from .channel import Channel
from .connection import Connection
class Hub(Processor, ABC):
""" Connections & Channels Container """
@abstractmethod
def open_channel(self, remote: Optional[tuple], local: Optional[tuple]) -> Optional[Channel]:
"""
Open a channel with direction (remote, local)
:param remote: remote address to connected
:param local: local address to bound
:return: None on socket error
"""
raise NotImplemented
@abstractmethod
def close_channel(self, channel: Channel):
"""
Close socket channel
:param channel: socket channel
:return:
"""
raise NotImplemented
@abstractmethod
def connect(self, remote: tuple, local: Optional[tuple] = None) -> Optional[Connection]:
"""
Get connection with direction (remote, local)
:param remote: remote address
:param local: local address
:return: None on channel not opened
"""
raise NotImplemented
@abstractmethod
def disconnect(self, remote: tuple = None, local: Optional[tuple] = None,
connection: Connection = None) -> Optional[Connection]:
"""
Close connection
:param remote: remote address
:param local: local address
:param connection: closing connection
:return: closed connection
"""
raise NotImplemented
#
# Local Address
#
@classmethod
@classmethod
@classmethod
@classmethod
| 32.223881
| 119
| 0.603057
|
# -*- coding: utf-8 -*-
#
# Star Trek: Interstellar Transport
#
# Written in 2021 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2021 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import socket
from abc import ABC, abstractmethod
from typing import Optional, Set
from ..fsm import Processor
from .channel import Channel
from .connection import Connection
class Hub(Processor, ABC):
""" Connections & Channels Container """
@abstractmethod
def open_channel(self, remote: Optional[tuple], local: Optional[tuple]) -> Optional[Channel]:
"""
Open a channel with direction (remote, local)
:param remote: remote address to connected
:param local: local address to bound
:return: None on socket error
"""
raise NotImplemented
@abstractmethod
def close_channel(self, channel: Channel):
"""
Close socket channel
:param channel: socket channel
:return:
"""
raise NotImplemented
@abstractmethod
def connect(self, remote: tuple, local: Optional[tuple] = None) -> Optional[Connection]:
"""
Get connection with direction (remote, local)
:param remote: remote address
:param local: local address
:return: None on channel not opened
"""
raise NotImplemented
@abstractmethod
def disconnect(self, remote: tuple = None, local: Optional[tuple] = None,
connection: Connection = None) -> Optional[Connection]:
"""
Close connection
:param remote: remote address
:param local: local address
:param connection: closing connection
:return: closed connection
"""
raise NotImplemented
#
# Local Address
#
@classmethod
def host_name(cls) -> str:
return socket.gethostname()
@classmethod
def addr_info(cls): # -> List[Tuple[Union[AddressFamily, int], Union[SocketKind, int], int, str, Tuple[Any, ...]]]
host = socket.gethostname()
if host is not None:
try:
return socket.getaddrinfo(host, None)
except socket.error as error:
print('[NET] failed to get address info: %s' % error)
return []
@classmethod
def inet_addresses(cls) -> Set[str]:
addresses = set()
info = cls.addr_info()
for item in info:
addresses.add(item[4][0])
return addresses
@classmethod
def inet_address(cls) -> Optional[str]:
# get from addr info
info = cls.addr_info()
for item in info:
ip = item[4][0]
if ':' not in ip and '127.0.0.1' != ip:
return ip
# get from UDP socket
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
remote = ('8.8.8.8', 8888)
sock.connect(remote)
ip = sock.getsockname()[0]
finally:
if sock is not None:
sock.close()
return ip
| 1,119
| 0
| 104
|
2b3dc140b83bd17428d4540aee0588d643a72af6
| 2,405
|
py
|
Python
|
Python_Misc/TMWP_OO_CustDataStructures.py
|
TheMitchWorksPro/DataTech_Playground
|
d62266d21762315f431fb4f707940095901b85e6
|
[
"MIT"
] | null | null | null |
Python_Misc/TMWP_OO_CustDataStructures.py
|
TheMitchWorksPro/DataTech_Playground
|
d62266d21762315f431fb4f707940095901b85e6
|
[
"MIT"
] | null | null | null |
Python_Misc/TMWP_OO_CustDataStructures.py
|
TheMitchWorksPro/DataTech_Playground
|
d62266d21762315f431fb4f707940095901b85e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Filename: TMWP_OO_CustDataStructures.py
# TMWP = TheMitchWorksPro
# Functions and/or Objects for smarter handling of common data structures
# required imports are noted in the code where used/required
# from ... import ...
version = '0.1'
python_version_support = 'code shoud be compatibile w/ Python 2.7 and Python 3.x'
import collections
# source: https://code.activestate.com/recipes/576694/
# added to collection for testing but may or may not be tested yet
# notes say this was created for Python 2.6 but should be compatible w/ 2.7 and 3.x
# Alternatives: from sortedcontainers import SortedSet
# pip install sortedcontainers library to use it
# libraries needed: import collections
| 30.833333
| 92
| 0.575468
|
#!/usr/bin/python
# Filename: TMWP_OO_CustDataStructures.py
# TMWP = TheMitchWorksPro
# Functions and/or Objects for smarter handling of common data structures
# required imports are noted in the code where used/required
# from ... import ...
version = '0.1'
python_version_support = 'code shoud be compatibile w/ Python 2.7 and Python 3.x'
import collections
class OrderedSet(collections.MutableSet):
# source: https://code.activestate.com/recipes/576694/
# added to collection for testing but may or may not be tested yet
# notes say this was created for Python 2.6 but should be compatible w/ 2.7 and 3.x
# Alternatives: from sortedcontainers import SortedSet
# pip install sortedcontainers library to use it
# libraries needed: import collections
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
| 1,325
| 20
| 294
|
afc7055446a0082505c7e85721cf070a71325344
| 30,535
|
py
|
Python
|
py/instalog/plugins/buffer_file_common.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 3
|
2022-01-06T16:52:52.000Z
|
2022-03-07T11:30:47.000Z
|
py/instalog/plugins/buffer_file_common.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | null | null | null |
py/instalog/plugins/buffer_file_common.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-24T01:47:22.000Z
|
2021-10-24T01:47:22.000Z
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""File-based buffer common.
A file-based buffer which writes its events to a single file on disk, and
separately maintains metadata.
There are three files maintained, plus one for each consumer created:
data.json:
Stores actual data. Each line corresponds to one event. As events are
written to disk, each one is given a sequence number. Format of each line:
[SEQ_NUM, {EVENT_DATA}, CRC_SUM]
Writing SEQ_NUM to data.json is not strictly necessary since we keep track
of sequence numbers in metadata, but it is useful for debugging, and could
also help when restoring a corrupt database.
metadata.json:
Stores current sequence numbers and cursor positions. The "first seq" and
"start pos" are taken to be absolute to the original untruncated data file,
and refer to the beginning of data currently stored on disk.
So if seq=1 was consumed by all Consumers, and Truncate removed it from
disk, first_seq would be set to 2.
Note that since the cursor positions are absolute, start_pos must be
subtracted to get the actual position in the file on disk, e.g.:
f.seek(current_pos - start_pos)
consumers.json:
Stores a list of all active Consumers. If a Consumer is removed, it will be
removed from this list, but its metadata file will continue to exist. If it
is ever re-created, the existing metadata will be used. If this is
undesired behaviour, the metadata file for that Consumer should be manually
deleted.
consumer_X.json:
Stores the sequence number and cursor position of a particular Consumer.
Versioning:
Another concept that is worth explaining separately is "versioning". We want
to support truncating, that is, when our file contains N records which have
already been consumed by all Consumers, and M remaining records, remove the
first N records from the main data file in order to save disk space. After
rewriting the data file, update metadata accordingly.
But what happens if a failure occurs in between these two steps? Our "old"
metadata now is now paired with a "new" data file, which means we will likely
be unable to read anything properly.
To solve this problem, before re-writing the main data file, we save a
metadata file to disk with both "old" and "new" metadata versions *before*
performing a truncate on the main data file. The key is a CRC hash of the
first line of the main data file. When the buffer first starts, it will check
to see which key matches the first line, and it will use this metadata
version.
Thus, if a failure occurs *before* writing the main data file, the "old"
metadata can be used. If a failure occurs *after* writing the main data file,
the "new" metadata can be used.
"""
import copy
import json
import logging
import os
import shutil
import zlib
from cros.factory.instalog import datatypes
from cros.factory.instalog import lock_utils
from cros.factory.instalog import log_utils
from cros.factory.instalog import plugin_base
from cros.factory.instalog.utils import file_utils
# The number of bytes to buffer when retrieving events from a file.
_BUFFER_SIZE_BYTES = 4 * 1024 # 4kb
class SimpleFileException(Exception):
"""General exception type for this plugin."""
def GetChecksumLegacy(data):
"""Generates an 8-character CRC32 checksum of given string."""
# TODO(chuntsen): Remove this legacy function.
# The function crc32() returns a signed 32-bit integer in Python2, but it
# returns an unsigned 32-bit integer in Python3. To generate the same value
# across all Python versions, we convert unsigned integer to signed integer.
checksum = zlib.crc32(data.encode('utf-8'))
if checksum >= 2**31:
checksum -= 2**32
return '{:08x}'.format(abs(checksum))
def GetChecksum(data):
"""Generates an 8-character CRC32 checksum of given string."""
# The function crc32() returns a signed 32-bit integer in Python2, but it
# returns an unsigned 32-bit integer in Python3. To generate the same value
# across all Python versions, we use "crc32() & 0xffffffff".
return '{:08x}'.format(zlib.crc32(data.encode('utf-8')) & 0xffffffff)
def FormatRecord(seq, record):
"""Returns a record formatted as a line to be written to disk."""
data = '%d, %s' % (seq, record)
checksum = GetChecksum(data)
return '[%s, "%s"]\n' % (data, checksum)
def ParseRecord(line, logger_name=None):
"""Parses and returns a line from disk as a record.
Returns:
A tuple of (seq_number, record), or None on failure.
"""
logger = logging.getLogger(logger_name)
line_inner = line.rstrip()[1:-1] # Strip [] and newline
data, _, checksum = line_inner.rpartition(', ')
# TODO(chuntsen): Change this method after a long time.
checksum = checksum.strip('"')
seq, _, record = data.partition(', ')
if not seq or not record:
logger.warning('Parsing error for record %s', line.rstrip())
return None, None
if checksum != GetChecksum(data) and checksum != GetChecksumLegacy(data):
logger.warning('Checksum error for record %s', line.rstrip())
return None, None
return int(seq), record
def TryLoadJSON(path, logger_name=None):
"""Attempts to load JSON from the given file.
Returns:
Parsed data from the file. None if the file does not exist.
Raises:
Exception if there was some other problem reading the file, or if something
went wrong parsing the data.
"""
logger = logging.getLogger(logger_name)
if not os.path.isfile(path):
logger.debug('%s: does not exist', path)
return None
try:
with open(path, 'r') as f:
return json.load(f)
except Exception:
logger.exception('%s: Error reading disk or loading JSON', path)
raise
def CopyAttachmentsToTempDir(att_paths, tmp_dir, logger_name=None):
"""Copys attachments to the temporary directory."""
logger = logging.getLogger(logger_name)
try:
for att_path in att_paths:
# Check that the source file exists.
if not os.path.isfile(att_path):
raise ValueError('Attachment path `%s` specified in event does not '
'exist' % att_path)
target_path = os.path.join(tmp_dir, att_path.replace('/', '_'))
logger.debug('Copying attachment: %s --> %s',
att_path, target_path)
with open(target_path, 'w') as dst_f:
with open(att_path, 'r') as src_f:
shutil.copyfileobj(src_f, dst_f)
# Fsync the file and the containing directory to make sure it
# is flushed to disk.
dst_f.flush()
os.fdatasync(dst_f)
# Fsync the containing directory to make sure all attachments are flushed
# to disk.
dirfd = os.open(tmp_dir, os.O_DIRECTORY)
os.fsync(dirfd)
os.close(dirfd)
return True
except Exception:
logger.exception('Exception encountered when copying attachments')
return False
def MoveAndWrite(config_dct, events):
"""Moves the atts, serializes the events and writes them to the data_path."""
logger = logging.getLogger(config_dct['logger_name'])
metadata_dct = RestoreMetadata(config_dct)
cur_seq = metadata_dct['last_seq'] + 1
cur_pos = metadata_dct['end_pos'] - metadata_dct['start_pos']
# Truncate the size of the file in case of a previously unfinished
# transaction.
with open(config_dct['data_path'], 'a') as f:
f.truncate(cur_pos)
with open(config_dct['data_path'], 'a') as f:
# On some machines, the file handle offset isn't set to EOF until
# a write occurs. Thus we must manually seek to the end to ensure
# that f.tell() will return useful results.
f.seek(0, 2) # 2 means use EOF as the reference point.
assert f.tell() == cur_pos
for event in events:
for att_id, att_path in event.attachments.items():
target_name = '%s_%s' % (cur_seq, att_id)
target_path = os.path.join(config_dct['attachments_dir'], target_name)
event.attachments[att_id] = target_name
logger.debug('Relocating attachment %s: %s --> %s',
att_id, att_path, target_path)
# Note: This could potentially overwrite an existing file that got
# written just before Instalog process stopped unexpectedly.
os.rename(att_path, target_path)
logger.debug('Writing event with cur_seq=%d, cur_pos=%d',
cur_seq, cur_pos)
output = FormatRecord(cur_seq, event.Serialize())
# Store the version for SaveMetadata to use.
if cur_pos == 0:
metadata_dct['version'] = GetChecksum(output)
f.write(output)
cur_seq += 1
cur_pos += len(output)
if config_dct['args'].enable_fsync:
# Fsync the file and the containing directory to make sure it
# is flushed to disk.
f.flush()
os.fdatasync(f)
dirfd = os.open(os.path.dirname(config_dct['data_path']),
os.O_DIRECTORY)
os.fsync(dirfd)
os.close(dirfd)
metadata_dct['last_seq'] = cur_seq - 1
metadata_dct['end_pos'] = metadata_dct['start_pos'] + cur_pos
SaveMetadata(config_dct, metadata_dct)
def SaveMetadata(config_dct, metadata_dct, old_metadata_dct=None):
"""Writes metadata of main database to disk."""
if not metadata_dct['version']:
raise SimpleFileException('No `version` available for SaveMetadata')
data = {metadata_dct['version']: metadata_dct}
if old_metadata_dct and old_metadata_dct['version']:
if metadata_dct['version'] == old_metadata_dct['version']:
raise SimpleFileException(
'Same `version` from new metadata and old metadata')
data[old_metadata_dct['version']] = old_metadata_dct
with file_utils.AtomicWrite(config_dct['metadata_path'], fsync=True) as f:
json.dump(data, f)
def RestoreMetadata(config_dct):
"""Restores version from the main data file on disk.
If the metadata file does not exist, will silently return.
"""
logger = logging.getLogger(config_dct['logger_name'])
metadata_dct = {'first_seq': 1, 'last_seq': 0,
'start_pos': 0, 'end_pos': 0,
'version': '00000000'}
data = TryLoadJSON(config_dct['metadata_path'], logger.name)
if data is not None:
try:
with open(config_dct['data_path'], 'r') as f:
metadata_dct['version'] = GetChecksum(f.readline())
except Exception:
logger.error('Data file unexpectedly missing; resetting metadata')
return metadata_dct
if metadata_dct['version'] not in data:
logger.error('Could not find metadata version %s (available: %s); '
'recovering metadata from data file',
metadata_dct['version'], ', '.join(data.keys()))
RecoverMetadata(config_dct, metadata_dct)
return metadata_dct
if len(data) > 1:
logger.info('Metadata contains multiple versions %s; choosing %s',
', '.join(data.keys()), metadata_dct['version'])
metadata_dct.update(data[metadata_dct['version']])
if (metadata_dct['end_pos'] >
metadata_dct['start_pos'] + os.path.getsize(config_dct['data_path'])):
logger.error('end_pos in restored metadata is larger than start_pos + '
'data file; recovering metadata from data file')
RecoverMetadata(config_dct, metadata_dct)
else:
if os.path.isfile(config_dct['data_path']):
logger.error('Could not find metadata file, but we have data file; '
'recovering metadata from data file')
RecoverMetadata(config_dct, metadata_dct)
else:
logger.info('Creating metadata file and data file')
SaveMetadata(config_dct, metadata_dct)
file_utils.TouchFile(config_dct['data_path'])
return metadata_dct
def RecoverMetadata(config_dct, metadata_dct):
"""Recovers metadata from the main data file on disk.
Uses the first valid record for first_seq and start_pos, and the last
valid record for last_seq and end_pos.
"""
logger = logging.getLogger(config_dct['logger_name'])
first_record = True
cur_pos = 0
with open(config_dct['data_path'], 'r') as f:
for line in f:
seq, _unused_record = ParseRecord(line, config_dct['logger_name'])
if first_record and seq:
metadata_dct['first_seq'] = seq
metadata_dct['start_pos'] = cur_pos
first_record = False
cur_pos += len(line)
if seq:
metadata_dct['last_seq'] = seq
metadata_dct['end_pos'] = cur_pos
logger.info('Finished recovering metadata; sequence range found: %d to %d',
metadata_dct['first_seq'], metadata_dct['last_seq'])
SaveMetadata(config_dct, metadata_dct)
def TruncateAttachments(config_dct, metadata_dct):
"""Deletes attachments of events no longer stored within data.json."""
logger = logging.getLogger(config_dct['logger_name'])
for fname in os.listdir(config_dct['attachments_dir']):
fpath = os.path.join(config_dct['attachments_dir'], fname)
if not os.path.isfile(fpath):
continue
seq, unused_underscore, unused_att_id = fname.partition('_')
if not seq.isdigit():
continue
if int(seq) < metadata_dct['first_seq']:
logger.debug('Truncating attachment (<seq=%d): %s',
metadata_dct['first_seq'], fname)
os.unlink(fpath)
def Truncate(config_dct, min_seq, min_pos):
"""Truncates the main data file to only contain unprocessed records.
See file-level docstring for more information about versions.
"""
logger = logging.getLogger(config_dct['logger_name'])
metadata_dct = RestoreMetadata(config_dct)
# Does the buffer already have data in it?
if not metadata_dct['version']:
return
if metadata_dct['first_seq'] == min_seq:
logger.info('No need to truncate')
return
try:
logger.debug('Will truncate up until seq=%d, pos=%d', min_seq, min_pos)
# Prepare the old vs. new metadata to write to disk.
old_metadata_dct = copy.deepcopy(metadata_dct)
metadata_dct['first_seq'] = min_seq
metadata_dct['start_pos'] = min_pos
with file_utils.AtomicWrite(config_dct['data_path'], fsync=True) as new_f:
# AtomicWrite opens a file handle to a temporary file right next to
# the real file (data_path), so we can open a "read" handle on data_path
# without affecting AtomicWrite's handle. Only when AtomicWrite's context
# block ends will the temporary be moved to replace data_path.
with open(config_dct['data_path'], 'r') as old_f:
old_f.seek(min_pos - old_metadata_dct['start_pos'])
# Deal with the first line separately to get the new version.
first_line = old_f.readline()
metadata_dct['version'] = GetChecksum(first_line)
new_f.write(first_line)
shutil.copyfileobj(old_f, new_f)
# Before performing the "replace" step of write-replace (when
# the file_utils.AtomicWrite context ends), save metadata to disk in
# case of disk failure.
SaveMetadata(config_dct, metadata_dct, old_metadata_dct)
# After we use AtomicWrite, we can remove old metadata.
SaveMetadata(config_dct, metadata_dct)
except Exception:
logger.exception('Exception occurred during Truncate operation')
raise
class Consumer(log_utils.LoggerMixin, plugin_base.BufferEventStream):
"""Represents a Consumer and its BufferEventStream.
Since SimpleFile has only a single database file, there can only ever be one
functioning BufferEventStream at any given time. So we bundle the Consumer
and its BufferEventStream into one object. When CreateStream is called, a
lock is acquired and the Consumer object is return. The lock must first be
acquired before any of Next, Commit, or Abort can be used.
"""
def CreateStream(self):
"""Creates a BufferEventStream object to be used by Instalog core.
Since this class doubles as BufferEventStream, we mark that the
BufferEventStream is "unexpired" by setting self._stream_lock,
and return self.
Returns:
`self` if BufferEventStream not already in use, None if busy.
"""
return self if self._stream_lock.acquire(False) else None
def _SaveMetadata(self):
"""Saves metadata for this Consumer to disk (seq and pos)."""
data = {'cur_seq': self.cur_seq,
'cur_pos': self.cur_pos}
with file_utils.AtomicWrite(self.metadata_path, fsync=True) as f:
json.dump(data, f)
def RestoreConsumerMetadata(self):
"""Restores metadata for this Consumer from disk (seq and pos).
On each restore, ensure that the available window of records on disk has
not surpassed our own current record. How would this happen? If the
Consumer is removed, records it still hasn't read are truncated from the
main database, and the Consumer is re-added under the same name.
If the metadata file does not exist, will silently return.
"""
data = TryLoadJSON(self.metadata_path, self.logger.name)
if data is not None:
if 'cur_seq' not in data or 'cur_pos' not in data:
self.error('Consumer %s metadata file invalid; resetting', self.name)
return
# Make sure we are still ahead of simple_file.
with self.read_lock:
metadata_dct = RestoreMetadata(self.simple_file.ConfigToDict())
self.cur_seq = min(max(metadata_dct['first_seq'], data['cur_seq']),
metadata_dct['last_seq'] + 1)
self.cur_pos = min(max(metadata_dct['start_pos'], data['cur_pos']),
metadata_dct['end_pos'])
if (data['cur_seq'] < metadata_dct['first_seq'] or
data['cur_seq'] > (metadata_dct['last_seq'] + 1)):
self.error('Consumer %s cur_seq=%d is out of buffer range %d to %d, '
'correcting to %d', self.name, data['cur_seq'],
metadata_dct['first_seq'], metadata_dct['last_seq'] + 1,
self.cur_seq)
self.new_seq = self.cur_seq
self.new_pos = self.cur_pos
def _Buffer(self):
"""Returns a list of pending records.
Stores the current buffer internally at self.read_buf. If it already has
data in it, self.read_buf will be returned as-is. It will be "refilled"
when it is empty.
Reads up to _BUFFER_SIZE_BYTES from the file on each "refill".
Returns:
A list of records, where each is a three-element tuple:
(record_seq, record_data, line_bytes).
"""
if self.read_buf:
return self.read_buf
# Does the buffer already have data in it?
if not os.path.exists(self.simple_file.data_path):
return self.read_buf
self.debug('_Buffer: waiting for read_lock')
try:
# When the buffer is truncating, we can't get the read_lock.
if not self.read_lock.acquire(timeout=0.5):
return []
metadata_dct = RestoreMetadata(self.simple_file.ConfigToDict())
with open(self.simple_file.data_path, 'r') as f:
cur = self.new_pos - metadata_dct['start_pos']
f.seek(cur)
total_bytes = 0
skipped_bytes = 0
for line in f:
if total_bytes > _BUFFER_SIZE_BYTES:
break
size = len(line)
cur += size
if cur > (metadata_dct['end_pos'] - metadata_dct['start_pos']):
break
seq, record = ParseRecord(line, self.logger.name)
if seq is None:
# Parsing of this line failed for some reason.
skipped_bytes += size
continue
# Only add to total_bytes for a valid line.
total_bytes += size
# Include any skipped bytes from previously skipped records in the
# "size" of this record, in order to allow the consumer to skip to the
# proper offset.
self.read_buf.append((seq, record, size + skipped_bytes))
skipped_bytes = 0
finally:
self.read_lock.CheckAndRelease()
return self.read_buf
def _Next(self):
"""Helper for Next, also used for testing purposes.
Returns:
A tuple of (seq, record), or (None, None) if no records available.
"""
if not self._stream_lock.IsHolder():
raise plugin_base.EventStreamExpired
buf = self._Buffer()
if not buf:
return None, None
seq, record, size = buf.pop(0)
self.new_seq = seq + 1
self.new_pos += size
return seq, record
def Next(self):
"""See BufferEventStream.Next."""
seq, record = self._Next()
if not seq:
return None
event = datatypes.Event.Deserialize(record)
return self.simple_file.ExternalizeEvent(event)
def Commit(self):
"""See BufferEventStream.Commit."""
if not self._stream_lock.IsHolder():
raise plugin_base.EventStreamExpired
self.cur_seq = self.new_seq
self.cur_pos = self.new_pos
# Ensure that regardless of any errors, locks are released.
try:
self._SaveMetadata()
except Exception:
# TODO(kitching): Instalog core or PluginSandbox should catch this
# exception and attempt to safely shut down.
self.exception('Commit: Write exception occurred, Events may be '
'processed by output plugin multiple times')
finally:
try:
self._stream_lock.release()
except Exception:
# TODO(kitching): Instalog core or PluginSandbox should catch this
# exception and attempt to safely shut down.
self.exception('Commit: Internal error occurred')
def Abort(self):
"""See BufferEventStream.Abort."""
if not self._stream_lock.IsHolder():
raise plugin_base.EventStreamExpired
self.new_seq = self.cur_seq
self.new_pos = self.cur_pos
self.read_buf = []
try:
self._stream_lock.release()
except Exception:
# TODO(kitching): Instalog core or PluginSandbox should catch this
# exception and attempt to safely shut down.
self.exception('Abort: Internal error occurred')
| 38.360553
| 80
| 0.684166
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""File-based buffer common.
A file-based buffer which writes its events to a single file on disk, and
separately maintains metadata.
There are three files maintained, plus one for each consumer created:
data.json:
Stores actual data. Each line corresponds to one event. As events are
written to disk, each one is given a sequence number. Format of each line:
[SEQ_NUM, {EVENT_DATA}, CRC_SUM]
Writing SEQ_NUM to data.json is not strictly necessary since we keep track
of sequence numbers in metadata, but it is useful for debugging, and could
also help when restoring a corrupt database.
metadata.json:
Stores current sequence numbers and cursor positions. The "first seq" and
"start pos" are taken to be absolute to the original untruncated data file,
and refer to the beginning of data currently stored on disk.
So if seq=1 was consumed by all Consumers, and Truncate removed it from
disk, first_seq would be set to 2.
Note that since the cursor positions are absolute, start_pos must be
subtracted to get the actual position in the file on disk, e.g.:
f.seek(current_pos - start_pos)
consumers.json:
Stores a list of all active Consumers. If a Consumer is removed, it will be
removed from this list, but its metadata file will continue to exist. If it
is ever re-created, the existing metadata will be used. If this is
undesired behaviour, the metadata file for that Consumer should be manually
deleted.
consumer_X.json:
Stores the sequence number and cursor position of a particular Consumer.
Versioning:
Another concept that is worth explaining separately is "versioning". We want
to support truncating, that is, when our file contains N records which have
already been consumed by all Consumers, and M remaining records, remove the
first N records from the main data file in order to save disk space. After
rewriting the data file, update metadata accordingly.
But what happens if a failure occurs in between these two steps? Our "old"
metadata now is now paired with a "new" data file, which means we will likely
be unable to read anything properly.
To solve this problem, before re-writing the main data file, we save a
metadata file to disk with both "old" and "new" metadata versions *before*
performing a truncate on the main data file. The key is a CRC hash of the
first line of the main data file. When the buffer first starts, it will check
to see which key matches the first line, and it will use this metadata
version.
Thus, if a failure occurs *before* writing the main data file, the "old"
metadata can be used. If a failure occurs *after* writing the main data file,
the "new" metadata can be used.
"""
import copy
import json
import logging
import os
import shutil
import zlib
from cros.factory.instalog import datatypes
from cros.factory.instalog import lock_utils
from cros.factory.instalog import log_utils
from cros.factory.instalog import plugin_base
from cros.factory.instalog.utils import file_utils
# The number of bytes to buffer when retrieving events from a file.
_BUFFER_SIZE_BYTES = 4 * 1024 # 4kb
class SimpleFileException(Exception):
"""General exception type for this plugin."""
def GetChecksumLegacy(data):
"""Generates an 8-character CRC32 checksum of given string."""
# TODO(chuntsen): Remove this legacy function.
# The function crc32() returns a signed 32-bit integer in Python2, but it
# returns an unsigned 32-bit integer in Python3. To generate the same value
# across all Python versions, we convert unsigned integer to signed integer.
checksum = zlib.crc32(data.encode('utf-8'))
if checksum >= 2**31:
checksum -= 2**32
return '{:08x}'.format(abs(checksum))
def GetChecksum(data):
"""Generates an 8-character CRC32 checksum of given string."""
# The function crc32() returns a signed 32-bit integer in Python2, but it
# returns an unsigned 32-bit integer in Python3. To generate the same value
# across all Python versions, we use "crc32() & 0xffffffff".
return '{:08x}'.format(zlib.crc32(data.encode('utf-8')) & 0xffffffff)
def FormatRecord(seq, record):
"""Returns a record formatted as a line to be written to disk."""
data = '%d, %s' % (seq, record)
checksum = GetChecksum(data)
return '[%s, "%s"]\n' % (data, checksum)
def ParseRecord(line, logger_name=None):
"""Parses and returns a line from disk as a record.
Returns:
A tuple of (seq_number, record), or None on failure.
"""
logger = logging.getLogger(logger_name)
line_inner = line.rstrip()[1:-1] # Strip [] and newline
data, _, checksum = line_inner.rpartition(', ')
# TODO(chuntsen): Change this method after a long time.
checksum = checksum.strip('"')
seq, _, record = data.partition(', ')
if not seq or not record:
logger.warning('Parsing error for record %s', line.rstrip())
return None, None
if checksum != GetChecksum(data) and checksum != GetChecksumLegacy(data):
logger.warning('Checksum error for record %s', line.rstrip())
return None, None
return int(seq), record
def TryLoadJSON(path, logger_name=None):
"""Attempts to load JSON from the given file.
Returns:
Parsed data from the file. None if the file does not exist.
Raises:
Exception if there was some other problem reading the file, or if something
went wrong parsing the data.
"""
logger = logging.getLogger(logger_name)
if not os.path.isfile(path):
logger.debug('%s: does not exist', path)
return None
try:
with open(path, 'r') as f:
return json.load(f)
except Exception:
logger.exception('%s: Error reading disk or loading JSON', path)
raise
def CopyAttachmentsToTempDir(att_paths, tmp_dir, logger_name=None):
"""Copys attachments to the temporary directory."""
logger = logging.getLogger(logger_name)
try:
for att_path in att_paths:
# Check that the source file exists.
if not os.path.isfile(att_path):
raise ValueError('Attachment path `%s` specified in event does not '
'exist' % att_path)
target_path = os.path.join(tmp_dir, att_path.replace('/', '_'))
logger.debug('Copying attachment: %s --> %s',
att_path, target_path)
with open(target_path, 'w') as dst_f:
with open(att_path, 'r') as src_f:
shutil.copyfileobj(src_f, dst_f)
# Fsync the file and the containing directory to make sure it
# is flushed to disk.
dst_f.flush()
os.fdatasync(dst_f)
# Fsync the containing directory to make sure all attachments are flushed
# to disk.
dirfd = os.open(tmp_dir, os.O_DIRECTORY)
os.fsync(dirfd)
os.close(dirfd)
return True
except Exception:
logger.exception('Exception encountered when copying attachments')
return False
def MoveAndWrite(config_dct, events):
"""Moves the atts, serializes the events and writes them to the data_path."""
logger = logging.getLogger(config_dct['logger_name'])
metadata_dct = RestoreMetadata(config_dct)
cur_seq = metadata_dct['last_seq'] + 1
cur_pos = metadata_dct['end_pos'] - metadata_dct['start_pos']
# Truncate the size of the file in case of a previously unfinished
# transaction.
with open(config_dct['data_path'], 'a') as f:
f.truncate(cur_pos)
with open(config_dct['data_path'], 'a') as f:
# On some machines, the file handle offset isn't set to EOF until
# a write occurs. Thus we must manually seek to the end to ensure
# that f.tell() will return useful results.
f.seek(0, 2) # 2 means use EOF as the reference point.
assert f.tell() == cur_pos
for event in events:
for att_id, att_path in event.attachments.items():
target_name = '%s_%s' % (cur_seq, att_id)
target_path = os.path.join(config_dct['attachments_dir'], target_name)
event.attachments[att_id] = target_name
logger.debug('Relocating attachment %s: %s --> %s',
att_id, att_path, target_path)
# Note: This could potentially overwrite an existing file that got
# written just before Instalog process stopped unexpectedly.
os.rename(att_path, target_path)
logger.debug('Writing event with cur_seq=%d, cur_pos=%d',
cur_seq, cur_pos)
output = FormatRecord(cur_seq, event.Serialize())
# Store the version for SaveMetadata to use.
if cur_pos == 0:
metadata_dct['version'] = GetChecksum(output)
f.write(output)
cur_seq += 1
cur_pos += len(output)
if config_dct['args'].enable_fsync:
# Fsync the file and the containing directory to make sure it
# is flushed to disk.
f.flush()
os.fdatasync(f)
dirfd = os.open(os.path.dirname(config_dct['data_path']),
os.O_DIRECTORY)
os.fsync(dirfd)
os.close(dirfd)
metadata_dct['last_seq'] = cur_seq - 1
metadata_dct['end_pos'] = metadata_dct['start_pos'] + cur_pos
SaveMetadata(config_dct, metadata_dct)
def SaveMetadata(config_dct, metadata_dct, old_metadata_dct=None):
"""Writes metadata of main database to disk."""
if not metadata_dct['version']:
raise SimpleFileException('No `version` available for SaveMetadata')
data = {metadata_dct['version']: metadata_dct}
if old_metadata_dct and old_metadata_dct['version']:
if metadata_dct['version'] == old_metadata_dct['version']:
raise SimpleFileException(
'Same `version` from new metadata and old metadata')
data[old_metadata_dct['version']] = old_metadata_dct
with file_utils.AtomicWrite(config_dct['metadata_path'], fsync=True) as f:
json.dump(data, f)
def RestoreMetadata(config_dct):
"""Restores version from the main data file on disk.
If the metadata file does not exist, will silently return.
"""
logger = logging.getLogger(config_dct['logger_name'])
metadata_dct = {'first_seq': 1, 'last_seq': 0,
'start_pos': 0, 'end_pos': 0,
'version': '00000000'}
data = TryLoadJSON(config_dct['metadata_path'], logger.name)
if data is not None:
try:
with open(config_dct['data_path'], 'r') as f:
metadata_dct['version'] = GetChecksum(f.readline())
except Exception:
logger.error('Data file unexpectedly missing; resetting metadata')
return metadata_dct
if metadata_dct['version'] not in data:
logger.error('Could not find metadata version %s (available: %s); '
'recovering metadata from data file',
metadata_dct['version'], ', '.join(data.keys()))
RecoverMetadata(config_dct, metadata_dct)
return metadata_dct
if len(data) > 1:
logger.info('Metadata contains multiple versions %s; choosing %s',
', '.join(data.keys()), metadata_dct['version'])
metadata_dct.update(data[metadata_dct['version']])
if (metadata_dct['end_pos'] >
metadata_dct['start_pos'] + os.path.getsize(config_dct['data_path'])):
logger.error('end_pos in restored metadata is larger than start_pos + '
'data file; recovering metadata from data file')
RecoverMetadata(config_dct, metadata_dct)
else:
if os.path.isfile(config_dct['data_path']):
logger.error('Could not find metadata file, but we have data file; '
'recovering metadata from data file')
RecoverMetadata(config_dct, metadata_dct)
else:
logger.info('Creating metadata file and data file')
SaveMetadata(config_dct, metadata_dct)
file_utils.TouchFile(config_dct['data_path'])
return metadata_dct
def RecoverMetadata(config_dct, metadata_dct):
"""Recovers metadata from the main data file on disk.
Uses the first valid record for first_seq and start_pos, and the last
valid record for last_seq and end_pos.
"""
logger = logging.getLogger(config_dct['logger_name'])
first_record = True
cur_pos = 0
with open(config_dct['data_path'], 'r') as f:
for line in f:
seq, _unused_record = ParseRecord(line, config_dct['logger_name'])
if first_record and seq:
metadata_dct['first_seq'] = seq
metadata_dct['start_pos'] = cur_pos
first_record = False
cur_pos += len(line)
if seq:
metadata_dct['last_seq'] = seq
metadata_dct['end_pos'] = cur_pos
logger.info('Finished recovering metadata; sequence range found: %d to %d',
metadata_dct['first_seq'], metadata_dct['last_seq'])
SaveMetadata(config_dct, metadata_dct)
def TruncateAttachments(config_dct, metadata_dct):
"""Deletes attachments of events no longer stored within data.json."""
logger = logging.getLogger(config_dct['logger_name'])
for fname in os.listdir(config_dct['attachments_dir']):
fpath = os.path.join(config_dct['attachments_dir'], fname)
if not os.path.isfile(fpath):
continue
seq, unused_underscore, unused_att_id = fname.partition('_')
if not seq.isdigit():
continue
if int(seq) < metadata_dct['first_seq']:
logger.debug('Truncating attachment (<seq=%d): %s',
metadata_dct['first_seq'], fname)
os.unlink(fpath)
def Truncate(config_dct, min_seq, min_pos):
"""Truncates the main data file to only contain unprocessed records.
See file-level docstring for more information about versions.
"""
logger = logging.getLogger(config_dct['logger_name'])
metadata_dct = RestoreMetadata(config_dct)
# Does the buffer already have data in it?
if not metadata_dct['version']:
return
if metadata_dct['first_seq'] == min_seq:
logger.info('No need to truncate')
return
try:
logger.debug('Will truncate up until seq=%d, pos=%d', min_seq, min_pos)
# Prepare the old vs. new metadata to write to disk.
old_metadata_dct = copy.deepcopy(metadata_dct)
metadata_dct['first_seq'] = min_seq
metadata_dct['start_pos'] = min_pos
with file_utils.AtomicWrite(config_dct['data_path'], fsync=True) as new_f:
# AtomicWrite opens a file handle to a temporary file right next to
# the real file (data_path), so we can open a "read" handle on data_path
# without affecting AtomicWrite's handle. Only when AtomicWrite's context
# block ends will the temporary be moved to replace data_path.
with open(config_dct['data_path'], 'r') as old_f:
old_f.seek(min_pos - old_metadata_dct['start_pos'])
# Deal with the first line separately to get the new version.
first_line = old_f.readline()
metadata_dct['version'] = GetChecksum(first_line)
new_f.write(first_line)
shutil.copyfileobj(old_f, new_f)
# Before performing the "replace" step of write-replace (when
# the file_utils.AtomicWrite context ends), save metadata to disk in
# case of disk failure.
SaveMetadata(config_dct, metadata_dct, old_metadata_dct)
# After we use AtomicWrite, we can remove old metadata.
SaveMetadata(config_dct, metadata_dct)
except Exception:
logger.exception('Exception occurred during Truncate operation')
raise
class BufferFile(log_utils.LoggerMixin):
def __init__(self, args, logger_name, data_dir):
"""Sets up the plugin."""
self.args = args
self.logger = logging.getLogger(logger_name)
self.data_dir = data_dir
self.data_path = os.path.join(
data_dir, 'data.json')
self.metadata_path = os.path.join(
data_dir, 'metadata.json')
self.consumers_list_path = os.path.join(
data_dir, 'consumers.json')
self.consumer_path_format = os.path.join(
data_dir, 'consumer_%s.json')
self.attachments_dir = os.path.join(
data_dir, 'attachments')
if not os.path.exists(self.attachments_dir):
os.makedirs(self.attachments_dir)
# Lock for writing to the self.data_path file. Used by
# Produce and Truncate.
self.data_write_lock = lock_utils.Lock(logger_name)
# Lock for modifying the self.consumers variable or for
# preventing other threads from changing it.
self._consumer_lock = lock_utils.Lock(logger_name)
self.consumers = {}
self._RestoreConsumers()
@property
def first_seq(self):
return RestoreMetadata(self.ConfigToDict())['first_seq']
@property
def last_seq(self):
return RestoreMetadata(self.ConfigToDict())['last_seq']
@property
def start_pos(self):
return RestoreMetadata(self.ConfigToDict())['start_pos']
@property
def end_pos(self):
return RestoreMetadata(self.ConfigToDict())['end_pos']
@property
def version(self):
return RestoreMetadata(self.ConfigToDict())['version']
def _SaveConsumers(self):
"""Saves the current list of active Consumers to disk."""
with file_utils.AtomicWrite(self.consumers_list_path, fsync=True) as f:
json.dump(list(self.consumers), f)
def _RestoreConsumers(self):
"""Restore Consumers from disk.
Creates a corresponding Consumer object for each Consumer listed on disk.
Only ever called when the buffer first starts up, so we don't need to
check for any existing Consumers in self.consumers.
"""
data = TryLoadJSON(self.consumers_list_path, self.logger.name)
if data:
for name in data:
self.consumers[name] = self._CreateConsumer(name)
def ExternalizeEvent(self, event):
"""Modifies attachment paths of given event to be absolute."""
for att_id in event.attachments.keys():
# Reconstruct the full path to the attachment on disk.
event.attachments[att_id] = os.path.abspath(os.path.join(
self.attachments_dir, event.attachments[att_id]))
return event
def ConfigToDict(self):
return {'logger_name': self.logger.name, 'args': self.args,
'data_dir': self.data_dir, 'data_path': self.data_path,
'metadata_path': self.metadata_path,
'consumers_list_path': self.consumers_list_path,
'consumer_path_format': self.consumer_path_format,
'attachments_dir': self.attachments_dir}
def ProduceEvents(self, events, process_pool=None):
"""Moves attachments, serializes events and writes them to the data_path."""
with self.data_write_lock:
# If we are going to write the first line which will change the version,
# we should prevent the data and metadata from being read by consumers.
metadata_dct = RestoreMetadata(self.ConfigToDict())
first_line = (metadata_dct['start_pos'] == metadata_dct['end_pos'])
try:
if first_line:
self._consumer_lock.acquire()
for consumer in self.consumers.values():
consumer.read_lock.acquire()
if process_pool is None:
MoveAndWrite(self.ConfigToDict(), events)
else:
process_pool.apply(MoveAndWrite, (self.ConfigToDict(), events))
except Exception:
self.exception('Exception occurred during ProduceEvents operation')
raise
finally:
# Ensure that regardless of any errors, locks are released.
if first_line:
for consumer in self.consumers.values():
consumer.read_lock.CheckAndRelease()
self._consumer_lock.CheckAndRelease()
def _GetFirstUnconsumedRecord(self):
"""Returns the seq and pos of the first unprocessed record.
Checks each Consumer to find the earliest unprocessed record, and returns
that record's seq and pos.
"""
metadata_dct = RestoreMetadata(self.ConfigToDict())
min_seq = metadata_dct['last_seq'] + 1
min_pos = metadata_dct['end_pos']
for consumer in self.consumers.values():
min_seq = min(min_seq, consumer.cur_seq)
min_pos = min(min_pos, consumer.cur_pos)
return min_seq, min_pos
def Truncate(self, truncate_attachments=True, process_pool=None):
"""Truncates the main data file to only contain unprocessed records.
See file-level docstring for more information about versions.
Args:
truncate_attachments: Whether or not to truncate attachments.
For testing.
"""
new_metadata_dct = {}
with self.data_write_lock, self._consumer_lock:
min_seq, min_pos = self._GetFirstUnconsumedRecord()
try:
for consumer in self.consumers.values():
consumer.read_lock.acquire()
if process_pool is None:
Truncate(self.ConfigToDict(), min_seq, min_pos)
else:
process_pool.apply(
Truncate,
(self.ConfigToDict(), min_seq, min_pos))
new_metadata_dct = RestoreMetadata(self.ConfigToDict())
except Exception:
self.exception('Exception occurred during Truncate operation')
# If any exceptions occurred, restore metadata, to make sure we are
# using the correct version, since we aren't sure if the write succeeded
# or not.
raise
finally:
# Ensure that regardless of any errors, locks are released.
for consumer in self.consumers.values():
consumer.read_lock.CheckAndRelease()
# Now that we have written the new data and metadata to disk, remove any
# unused attachments.
if truncate_attachments:
TruncateAttachments(self.ConfigToDict(), new_metadata_dct)
def _CreateConsumer(self, name):
"""Returns a new Consumer object with the given name."""
return Consumer(
name, self, self.consumer_path_format % name, self.logger.name)
def AddConsumer(self, name):
"""See BufferPlugin.AddConsumer."""
self.debug('Add consumer %s', name)
with self.data_write_lock, self._consumer_lock:
if name in self.consumers:
raise SimpleFileException('Consumer %s already exists' % name)
self.consumers[name] = self._CreateConsumer(name)
self._SaveConsumers()
def RemoveConsumer(self, name):
"""See BufferPlugin.RemoveConsumer."""
self.debug('Remove consumer %s', name)
with self.data_write_lock, self._consumer_lock:
if name not in self.consumers:
raise SimpleFileException('Consumer %s does not exist' % name)
del self.consumers[name]
self._SaveConsumers()
def ListConsumers(self):
"""See BufferPlugin.ListConsumers."""
with self._consumer_lock:
# cur_seq represents the sequence ID of the consumer's next event. If
# that event doesn't exist yet, it will be set to the next (non-existent)
# sequence ID. We must subtract 1 to get the "last completed" event.
cur_seqs = {key: consumer.cur_seq - 1
for key, consumer in self.consumers.items()}
# Grab last_seq at the end, in order to guarantee that for any consumer,
# last_seq >= cur_seq, and that all last_seq are equal.
last_seq = self.last_seq
return {key: (cur_seq, last_seq)
for key, cur_seq in cur_seqs.items()}
def Consume(self, name):
"""See BufferPlugin.Consume."""
return self.consumers[name].CreateStream()
class Consumer(log_utils.LoggerMixin, plugin_base.BufferEventStream):
"""Represents a Consumer and its BufferEventStream.
Since SimpleFile has only a single database file, there can only ever be one
functioning BufferEventStream at any given time. So we bundle the Consumer
and its BufferEventStream into one object. When CreateStream is called, a
lock is acquired and the Consumer object is return. The lock must first be
acquired before any of Next, Commit, or Abort can be used.
"""
def __init__(self, name, simple_file, metadata_path, logger_name):
self.name = name
self.simple_file = simple_file
self.metadata_path = metadata_path
self.logger = logging.getLogger(logger_name)
self._stream_lock = lock_utils.Lock(logger_name)
self.read_lock = lock_utils.Lock(logger_name)
self.read_buf = []
with self.read_lock:
metadata_dct = RestoreMetadata(self.simple_file.ConfigToDict())
self.cur_seq = metadata_dct['first_seq']
self.cur_pos = metadata_dct['start_pos']
self.new_seq = self.cur_seq
self.new_pos = self.cur_pos
# Try restoring metadata, if it exists.
self.RestoreConsumerMetadata()
self._SaveMetadata()
def CreateStream(self):
"""Creates a BufferEventStream object to be used by Instalog core.
Since this class doubles as BufferEventStream, we mark that the
BufferEventStream is "unexpired" by setting self._stream_lock,
and return self.
Returns:
`self` if BufferEventStream not already in use, None if busy.
"""
return self if self._stream_lock.acquire(False) else None
def _SaveMetadata(self):
"""Saves metadata for this Consumer to disk (seq and pos)."""
data = {'cur_seq': self.cur_seq,
'cur_pos': self.cur_pos}
with file_utils.AtomicWrite(self.metadata_path, fsync=True) as f:
json.dump(data, f)
def RestoreConsumerMetadata(self):
"""Restores metadata for this Consumer from disk (seq and pos).
On each restore, ensure that the available window of records on disk has
not surpassed our own current record. How would this happen? If the
Consumer is removed, records it still hasn't read are truncated from the
main database, and the Consumer is re-added under the same name.
If the metadata file does not exist, will silently return.
"""
data = TryLoadJSON(self.metadata_path, self.logger.name)
if data is not None:
if 'cur_seq' not in data or 'cur_pos' not in data:
self.error('Consumer %s metadata file invalid; resetting', self.name)
return
# Make sure we are still ahead of simple_file.
with self.read_lock:
metadata_dct = RestoreMetadata(self.simple_file.ConfigToDict())
self.cur_seq = min(max(metadata_dct['first_seq'], data['cur_seq']),
metadata_dct['last_seq'] + 1)
self.cur_pos = min(max(metadata_dct['start_pos'], data['cur_pos']),
metadata_dct['end_pos'])
if (data['cur_seq'] < metadata_dct['first_seq'] or
data['cur_seq'] > (metadata_dct['last_seq'] + 1)):
self.error('Consumer %s cur_seq=%d is out of buffer range %d to %d, '
'correcting to %d', self.name, data['cur_seq'],
metadata_dct['first_seq'], metadata_dct['last_seq'] + 1,
self.cur_seq)
self.new_seq = self.cur_seq
self.new_pos = self.cur_pos
def _Buffer(self):
"""Returns a list of pending records.
Stores the current buffer internally at self.read_buf. If it already has
data in it, self.read_buf will be returned as-is. It will be "refilled"
when it is empty.
Reads up to _BUFFER_SIZE_BYTES from the file on each "refill".
Returns:
A list of records, where each is a three-element tuple:
(record_seq, record_data, line_bytes).
"""
if self.read_buf:
return self.read_buf
# Does the buffer already have data in it?
if not os.path.exists(self.simple_file.data_path):
return self.read_buf
self.debug('_Buffer: waiting for read_lock')
try:
# When the buffer is truncating, we can't get the read_lock.
if not self.read_lock.acquire(timeout=0.5):
return []
metadata_dct = RestoreMetadata(self.simple_file.ConfigToDict())
with open(self.simple_file.data_path, 'r') as f:
cur = self.new_pos - metadata_dct['start_pos']
f.seek(cur)
total_bytes = 0
skipped_bytes = 0
for line in f:
if total_bytes > _BUFFER_SIZE_BYTES:
break
size = len(line)
cur += size
if cur > (metadata_dct['end_pos'] - metadata_dct['start_pos']):
break
seq, record = ParseRecord(line, self.logger.name)
if seq is None:
# Parsing of this line failed for some reason.
skipped_bytes += size
continue
# Only add to total_bytes for a valid line.
total_bytes += size
# Include any skipped bytes from previously skipped records in the
# "size" of this record, in order to allow the consumer to skip to the
# proper offset.
self.read_buf.append((seq, record, size + skipped_bytes))
skipped_bytes = 0
finally:
self.read_lock.CheckAndRelease()
return self.read_buf
def _Next(self):
"""Helper for Next, also used for testing purposes.
Returns:
A tuple of (seq, record), or (None, None) if no records available.
"""
if not self._stream_lock.IsHolder():
raise plugin_base.EventStreamExpired
buf = self._Buffer()
if not buf:
return None, None
seq, record, size = buf.pop(0)
self.new_seq = seq + 1
self.new_pos += size
return seq, record
def Next(self):
"""See BufferEventStream.Next."""
seq, record = self._Next()
if not seq:
return None
event = datatypes.Event.Deserialize(record)
return self.simple_file.ExternalizeEvent(event)
def Commit(self):
"""See BufferEventStream.Commit."""
if not self._stream_lock.IsHolder():
raise plugin_base.EventStreamExpired
self.cur_seq = self.new_seq
self.cur_pos = self.new_pos
# Ensure that regardless of any errors, locks are released.
try:
self._SaveMetadata()
except Exception:
# TODO(kitching): Instalog core or PluginSandbox should catch this
# exception and attempt to safely shut down.
self.exception('Commit: Write exception occurred, Events may be '
'processed by output plugin multiple times')
finally:
try:
self._stream_lock.release()
except Exception:
# TODO(kitching): Instalog core or PluginSandbox should catch this
# exception and attempt to safely shut down.
self.exception('Commit: Internal error occurred')
def Abort(self):
"""See BufferEventStream.Abort."""
if not self._stream_lock.IsHolder():
raise plugin_base.EventStreamExpired
self.new_seq = self.cur_seq
self.new_pos = self.cur_pos
self.read_buf = []
try:
self._stream_lock.release()
except Exception:
# TODO(kitching): Instalog core or PluginSandbox should catch this
# exception and attempt to safely shut down.
self.exception('Abort: Internal error occurred')
| 1,321
| 7,147
| 48
|
543088b20a8479e7f442e1d23bca724597885950
| 11,927
|
py
|
Python
|
model/trainer.py
|
anonymous165/PGRA
|
e14105ceeb881b3567dc4317171340155be50b0b
|
[
"MIT"
] | null | null | null |
model/trainer.py
|
anonymous165/PGRA
|
e14105ceeb881b3567dc4317171340155be50b0b
|
[
"MIT"
] | null | null | null |
model/trainer.py
|
anonymous165/PGRA
|
e14105ceeb881b3567dc4317171340155be50b0b
|
[
"MIT"
] | null | null | null |
from model.pgra import PGRA
from model.pgra_function.sc import Score
from data_utils.data_gen import LinkGenerator, init_seed_fn
from torch.utils.data import DataLoader
from time import perf_counter
import torch
import numpy as np
import config
from model.modules.regularizer import Regularizer
import tempfile
from collections import Counter
from tqdm import tqdm
import os
from model.tracker import LossTracker, MultiClsTracker
| 42.294326
| 120
| 0.594282
|
from model.pgra import PGRA
from model.pgra_function.sc import Score
from data_utils.data_gen import LinkGenerator, init_seed_fn
from torch.utils.data import DataLoader
from time import perf_counter
import torch
import numpy as np
import config
from model.modules.regularizer import Regularizer
import tempfile
from collections import Counter
from tqdm import tqdm
import os
from model.tracker import LossTracker, MultiClsTracker
class Trainer:
_default_optim = 'Adam'
_default_optim_params = {'lr': 1e-4}
def __init__(self, graph, edge_label, emb_size=128, degree=2, n_neighbor=20, batch_size=100, n_neg=5, num_workers=4,
score='inner', self_loop=True, test_batch_size=None, **kwargs):
super().__init__()
self.dataset_name = graph.name
self.graph = graph
self.edge_label = edge_label
self.n_edge_types = len(edge_label.edge_types)
self.emb_size = emb_size
self._cuda = False
subG = self.graph.G.edge_subgraph(self.edge_label.train_edges)
self.batch_size = batch_size
self.self_loop = self_loop
if self_loop:
subG = subG.copy()
for node in subG.nodes:
subG.add_edge(node, node, key=self.n_edge_types, label=self.n_edge_types)
self.subG = subG
self.n_neighbor = n_neighbor
adj_node, adj_rela = self.create_node_neighbors()
self.model = PGRA(graph.node_size, self.n_edge_types + (1 if self_loop else 0), emb_size, n_hop=degree,
n_neighbor=n_neighbor, **kwargs)
self.model.register_neighbors(adj_node, adj_rela)
self.model.reset()
self.lp_model = Score(score=score)
link_gen_train = LinkGenerator(edge_label, graph.nodes_of_types, n_neg=n_neg)
nt2id = dict(link_gen_train.node_type_to_id)
link_gen_val = LinkGenerator(edge_label, graph.nodes_of_types, n_neg=10, mode=1, node_type_to_id=nt2id)
link_gen_test = LinkGenerator(edge_label, graph.nodes_of_types, n_neg=10, mode=2, node_type_to_id=nt2id)
self.data_loader_val = None
self.data_loader_test = None
lg_collate = LinkGenerator.collate_fn
self.data_loader_train = DataLoader(link_gen_train, batch_size=batch_size, num_workers=num_workers,
shuffle=True, collate_fn=lg_collate, worker_init_fn=init_seed_fn())
self.data_loader_train_iter = iter(self.data_loader_train)
if test_batch_size is None:
test_batch_size = batch_size // 4
if len(link_gen_val) > 0:
self.data_loader_val = DataLoader(link_gen_val, batch_size=test_batch_size, num_workers=num_workers,
shuffle=True, collate_fn=lg_collate, worker_init_fn=init_seed_fn())
if len(link_gen_test) > 0:
self.data_loader_test = DataLoader(link_gen_test, batch_size=test_batch_size, num_workers=num_workers,
collate_fn=lg_collate, worker_init_fn=init_seed_fn())
self.used_keys = ['r', 'h', 't', 'h_neg', 't_neg']
self.optim = self._default_optim
self.optim_params = self._default_optim_params
self.optimizer = None
self.total_epoch = 0
self.total_steps = 0
self.pre_name = 'PGRA'
self.temp_dir = tempfile.mkstemp()[1]
self.best_iter = None
self.best_scores = None
self.test_scores = None
def create_node_neighbors(self):
print('create node neighbors')
adj_entity = np.zeros([self.graph.node_size, self.n_neighbor], dtype=np.int64)
adj_relation = np.zeros([self.graph.node_size, self.n_neighbor], dtype=np.int64)
for i in self.subG.nodes:
nbs = list(self.subG.edges(i, data='label'))
edge_type_ct = Counter()
for x in nbs:
edge_type_ct[x[2]] += 1
sampled = np.random.choice(len(nbs), size=self.n_neighbor, replace=True)
adj_entity[i] = [nbs[x][1] for x in sampled]
adj_relation[i] = [nbs[x][2] for x in sampled]
return adj_entity, adj_relation
def run(self, lr=0.0001, weight_decay=0, max_steps=50000, cuda=True, patience=10, metric='mrr', optim='Adam',
save=True):
if cuda:
self.cuda()
self.create_optimizer(optim, lr=lr, weight_decay=weight_decay)
self.train(max_steps=max_steps, patience=patience, metric=metric, save=save)
def train(self, max_steps=50000, patience=10, metric='mrr', save=True, eval_step=None):
if self.optimizer is None:
self.create_optimizer()
min_loss = float('inf')
t0 = perf_counter()
if eval_step is None:
eval_step = len(self.data_loader_train)
if eval_step > 500:
eval_step = 500
patience *= eval_step
reduce_lr = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=2, verbose=True, cooldown=20,
threshold=1e-3)
best_step = 0
self.best_scores = None
print('Training')
for step in range(0, max_steps, eval_step):
t = perf_counter()
losses = self.train_one_epoch(steps=eval_step)
# scores = self._eval(self.data_loader_train, ('mrr',))
# print('Train:', scores)
is_best = False
if self.data_loader_val is not None:
scores = self.eval(val=True)
print("Epoch:", '%04d' % (self.total_epoch + 1), "Step:", step, "train_loss=", losses,
"Val:", scores, "time=", "{:.5f}".format(perf_counter() - t))
if self.best_scores is None or scores[metric] > self.best_scores[metric]:
self.best_scores, best_step = scores, step
self.best_iter = best_step
is_best = True
else:
loss = losses.value
if min_loss > loss:
min_loss, best_step = loss, step
self.best_iter = step
is_best = True
if is_best and save:
self._save(self.temp_dir)
if step - best_step > patience:
print("Early stopping...")
break
reduce_lr.step(losses.value)
if reduce_lr.cooldown_counter == reduce_lr.cooldown:
reduce_lr.best = reduce_lr.mode_worse
train_time = perf_counter() - t0
print("Train time: {:.4f}s".format(train_time, ))
if self.total_steps > 0 and save:
print("Optimization Finished!")
print('Load model')
self._load(self.temp_dir)
self.save()
if self.data_loader_test is not None:
if self.data_loader_val is not None:
val_scores = self.eval(val=True)
print("Val:", val_scores)
self.test_scores = self.eval(val=False)
print("Test:", self.test_scores)
def cuda(self):
self._cuda = True
self.model.cuda()
def create_optimizer(self, optim=None, **kwargs):
if optim is not None:
self.optim = optim
self.optim_params.update(kwargs)
self.optimizer = getattr(torch.optim, self.optim)(
list(self.model.parameters()), **self.optim_params)
def save(self):
self._save(self.get_model_path())
def _save(self, path):
state = {
'model': self.model.state_dict(),
'best_iter': self.best_iter
}
torch.save(state, path)
def load(self):
self._load(self.get_model_path(build_path=False))
def _load(self, path):
state = torch.load(path, map_location='cpu')
self.model.load_state_dict(state['model'], strict=False)
self.best_iter = state['best_iter']
if self._cuda:
self.cuda()
def get_model_path(self, build_path=True):
folder = config.model_dir / self.dataset_name
if build_path:
folder.mkdir(parents=True, exist_ok=True)
path_model = folder / self._join(self.pre_name, self.emb_size, self.edge_label.code)
return path_model
@staticmethod
def _join(*args, sep='_'):
return sep.join([str(arg) for arg in args])
def get_train_data(self):
try:
data = next(self.data_loader_train_iter)
except StopIteration:
self.data_loader_train_iter = iter(self.data_loader_train)
self.total_epoch += 1
data = next(self.data_loader_train_iter)
return data
def train_one_epoch(self, steps=None):
if steps is None:
steps = len(self.data_loader_train)
pbar = tqdm(range(steps), disable=os.environ.get("DISABLE_TQDM", False))
losses = LossTracker()
self.model.train()
self.lp_model.train()
t = 0
for step in pbar:
_t = perf_counter()
all_data = self.get_train_data()
if self._cuda:
all_data = {k: v.cuda() for k, v in all_data.items()}
data = [all_data[x] for x in self.used_keys]
nodes_vec = [self.get_embedding(x, data[0]) for i, x in enumerate(data[1:])]
train_losses = self.lp_model(data[0], *nodes_vec)
if not isinstance(train_losses, tuple) and not isinstance(train_losses, list):
train_losses = [train_losses]
reg_losses = [m.loss for m in self.model.modules() if isinstance(m, Regularizer) and m.loss is not None]
train_losses = train_losses + reg_losses
losses.update(train_losses)
sum(train_losses).backward()
self.optimizer.step()
for m in self.model.modules():
if isinstance(m, Regularizer):
m.loss = None
self.model.constraint()
self.total_steps += 1
t += perf_counter() - _t
pbar.set_description(
'epoch {} loss:{} time:{:.2f}'.format(self.total_epoch, losses, t * len(pbar) / (step + 1)))
return losses
def eval(self, val=True, metrics=('mrr',)):
data_loader = self.data_loader_val if val else self.data_loader_test
return self._eval(data_loader, metrics, n=60000 if val else -1)
def _eval(self, data_loader, metrics, n=-1):
self.model.eval()
self.lp_model.eval()
lp_tracker = MultiClsTracker(metrics, self.n_edge_types)
with torch.no_grad():
for all_data in data_loader:
data = [all_data[x] for x in self.used_keys]
if self._cuda:
data = [x.cuda() for x in data]
r, p1, p2, n1, n2 = data
pos, neg = (p1, p2), (n1, n2)
for i in (0, 1):
feat1 = self.get_embedding(torch.cat((pos[i].unsqueeze(-1), neg[i]), dim=-1), r)
feat2 = self.get_embedding(pos[1 - i], r)
feat_args = (feat1, feat2) if i == 0 else (feat2, feat1)
ratings = self.lp_model.predict(r, *feat_args)
lp_tracker.update(ratings, r)
if 0 < n < len(lp_tracker):
break
print(lp_tracker.macro)
lp_tracker.summarize()
return lp_tracker
def get_embedding(self, node_idx, target_idx):
old_size = node_idx.size()
if target_idx is not None:
target_idx = target_idx.unsqueeze(1) if len(target_idx.size()) != len(node_idx.size()) else target_idx
target_idx = target_idx.expand_as(node_idx).contiguous().view(-1)
node_idx = node_idx.contiguous().view(-1)
return self.model(node_idx, target_idx).view(*old_size, -1)
| 10,933
| 539
| 23
|
df16ed42b4b9cb418284439c991e0ec166bf5c24
| 5,338
|
py
|
Python
|
autolens/pipeline/phase/imaging/phase.py
|
PyJedi/PyAutoLens
|
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
|
[
"MIT"
] | null | null | null |
autolens/pipeline/phase/imaging/phase.py
|
PyJedi/PyAutoLens
|
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
|
[
"MIT"
] | null | null | null |
autolens/pipeline/phase/imaging/phase.py
|
PyJedi/PyAutoLens
|
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
|
[
"MIT"
] | null | null | null |
from astropy import cosmology as cosmo
import autofit as af
from autolens.pipeline import tagging
from autolens.pipeline.phase import dataset
from autolens.pipeline.phase.imaging.analysis import Analysis
from autolens.pipeline.phase.imaging.meta_imaging import MetaImaging
from autolens.pipeline.phase.imaging.result import Result
| 33.15528
| 113
| 0.652117
|
from astropy import cosmology as cosmo
import autofit as af
from autolens.pipeline import tagging
from autolens.pipeline.phase import dataset
from autolens.pipeline.phase.imaging.analysis import Analysis
from autolens.pipeline.phase.imaging.meta_imaging import MetaImaging
from autolens.pipeline.phase.imaging.result import Result
class PhaseImaging(dataset.PhaseDataset):
galaxies = af.PhaseProperty("galaxies")
hyper_image_sky = af.PhaseProperty("hyper_image_sky")
hyper_background_noise = af.PhaseProperty("hyper_background_noise")
Analysis = Analysis
Result = Result
@af.convert_paths
def __init__(
self,
paths,
*,
galaxies=None,
hyper_image_sky=None,
hyper_background_noise=None,
non_linear_class=af.MultiNest,
cosmology=cosmo.Planck15,
sub_size=2,
signal_to_noise_limit=None,
bin_up_factor=None,
psf_shape_2d=None,
auto_positions_factor=None,
positions_threshold=None,
pixel_scale_interpolation_grid=None,
inversion_uses_border=True,
inversion_pixel_limit=None,
):
"""
A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper_galaxies
passed to it.
Parameters
----------
non_linear_class: class
The class of a non_linear optimizer
sub_size: int
The side length of the subgrid
"""
phase_tag = tagging.phase_tag_from_phase_settings(
sub_size=sub_size,
signal_to_noise_limit=signal_to_noise_limit,
bin_up_factor=bin_up_factor,
psf_shape_2d=psf_shape_2d,
auto_positions_factor=auto_positions_factor,
positions_threshold=positions_threshold,
pixel_scale_interpolation_grid=pixel_scale_interpolation_grid,
)
paths.phase_tag = phase_tag
super().__init__(
paths,
galaxies=galaxies,
non_linear_class=non_linear_class,
cosmology=cosmology,
)
self.hyper_image_sky = hyper_image_sky
self.hyper_background_noise = hyper_background_noise
self.is_hyper_phase = False
self.meta_dataset = MetaImaging(
model=self.model,
bin_up_factor=bin_up_factor,
psf_shape_2d=psf_shape_2d,
sub_size=sub_size,
signal_to_noise_limit=signal_to_noise_limit,
auto_positions_factor=auto_positions_factor,
positions_threshold=positions_threshold,
pixel_scale_interpolation_grid=pixel_scale_interpolation_grid,
inversion_uses_border=inversion_uses_border,
inversion_pixel_limit=inversion_pixel_limit,
)
def make_phase_attributes(self, analysis):
return PhaseAttributes(
cosmology=self.cosmology,
hyper_model_image=analysis.hyper_model_image,
hyper_galaxy_image_path_dict=analysis.hyper_galaxy_image_path_dict,
)
def make_analysis(
self, dataset, mask, results=af.ResultsCollection(), positions=None
):
"""
Create an lens object. Also calls the prior passing and masked_imaging modifying functions to allow child
classes to change the behaviour of the phase.
Parameters
----------
positions
mask: Mask
The default masks passed in by the pipeline
dataset: im.Imaging
An masked_imaging that has been masked
results: autofit.tools.pipeline.ResultsCollection
The result from the previous phase
Returns
-------
lens : Analysis
An lens object that the non-linear optimizer calls to determine the fit of a set of values
"""
self.meta_dataset.model = self.model
masked_imaging = self.meta_dataset.masked_dataset_from(
dataset=dataset, mask=mask, positions=positions, results=results
)
self.output_phase_info()
analysis = self.Analysis(
masked_imaging=masked_imaging,
cosmology=self.cosmology,
image_path=self.optimizer.paths.image_path,
results=results,
)
return analysis
def output_phase_info(self):
file_phase_info = "{}/{}".format(
self.optimizer.paths.phase_output_path, "phase.info"
)
with open(file_phase_info, "w") as phase_info:
phase_info.write("Optimizer = {} \n".format(type(self.optimizer).__name__))
phase_info.write("Sub-grid size = {} \n".format(self.meta_dataset.sub_size))
phase_info.write("PSF shape = {} \n".format(self.meta_dataset.psf_shape_2d))
phase_info.write(
"Positions Threshold = {} \n".format(
self.meta_dataset.positions_threshold
)
)
phase_info.write("Cosmology = {} \n".format(self.cosmology))
phase_info.close()
class PhaseAttributes:
def __init__(self, cosmology, hyper_model_image, hyper_galaxy_image_path_dict):
self.cosmology = cosmology
self.hyper_model_image = hyper_model_image
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
| 1,183
| 3,749
| 72
|
b2005ebd2213536fab5e45ed07b20f3bde3d470d
| 3,806
|
py
|
Python
|
utils/base_dataset.py
|
stegmuel/DANN_py3
|
8718fbc9e0e809a4ab4399c55d0df336dd44a092
|
[
"MIT"
] | null | null | null |
utils/base_dataset.py
|
stegmuel/DANN_py3
|
8718fbc9e0e809a4ab4399c55d0df336dd44a092
|
[
"MIT"
] | null | null | null |
utils/base_dataset.py
|
stegmuel/DANN_py3
|
8718fbc9e0e809a4ab4399c55d0df336dd44a092
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
import h5py
import abc
| 41.824176
| 119
| 0.636626
|
from torch.utils.data import Dataset
import h5py
import abc
class BaseDatasetHDF(Dataset):
def __init__(self, hdf5_filepath, phase, batch_size, use_cache, cache_size=30):
"""
Initializes the class BaseDatasetHDF relying on a .hdf5 file that contains the complete data for all phases
(train, test, validation). It contains the input data as well as the target data to reduce the amount of
computation done in fly. The samples are first split w.r.t. the phase (train, test, validation) and then w.r.t.
the status (input, target). A pair of (input, target) samples is accessed with the same index. For a given
phase a pair is accessed as: (hdf[phase]['input'][index], hdf[phase]['target'][index]).
The .hdf5 file is stored on disk and only the queried samples are loaded in RAM. To increase retrieval speed
a small cache in RAM is implemented. When using the cache, one should note the following observations:
- The speed will only improve if the data is not shuffled.
- The cache size must be adapted to the computer used.
- The number of workers of the data loader must be adapted to the computer used and the cache size.
- The cache size must be a multiple of the chunk size that was used when filling the .hdf5 file.
:param hdf5_filepath: location of the .hdf5 file.
:param phase: phase in which the dataset is used ('train'/'valid'/'test').
:param batch_size: size of a single batch.
:param use_cache: boolean indicating if the cache should be used or not.
:param cache_size: size of the cache in number of batches.
"""
self.hdf5_filepath = hdf5_filepath
self.phase = phase
self.batch_size = batch_size
# Initialize cache to store in RAM
self.use_cache = use_cache
if self.use_cache:
self.cache = {'input': None, 'target': None}
self.cache_size = cache_size * batch_size
self.cache_min_index = None
self.cache_max_index = None
self.load_chunk_to_cache(0)
def __len__(self):
"""
Returns the total length of the dataset.
:return: length of the dataset.
"""
with h5py.File(self.hdf5_filepath, 'r') as hdf:
length = hdf[self.phase]['input'].shape[0]
return length
def is_in_cache(self, index):
"""
Checks if the queried data is in cache.
:param index: index of the sample to load.
:return: boolean indicating if the data is available in cache.
"""
return index in set(range(self.cache_min_index, self.cache_max_index))
def load_chunk_to_cache(self, index):
"""
Loads a chunk of data in cache from disk. The chunk of data is the block of size self.size_cache and contains
the samples following the current index. This is only efficient if data is not shuffled.
:param index: index of a single sample that is currently being queried.
:return: None.
"""
with h5py.File(self.hdf5_filepath, 'r') as hdf:
self.cache_min_index = index
self.cache_max_index = min(len(self), index + self.cache_size)
self.cache['input'] = hdf[self.phase]['input'][self.cache_min_index: self.cache_max_index]
self.cache['target'] = hdf[self.phase]['target'][self.cache_min_index: self.cache_max_index]
@abc.abstractmethod
def transform(self, x):
"""
:param x:
:return:
"""
@abc.abstractmethod
def get_image_transformer(self):
"""
:return:
"""
@abc.abstractmethod
def transform(self, x):
"""
:param x:
:return:
"""
| 0
| 3,722
| 23
|
b4d3fe982d7be10dd3d9466b7731e2b90baea895
| 17,861
|
py
|
Python
|
model/utils.py
|
Open-Debin/aCASTLE
|
4c539c53ad35bae4592165ca45488d0de90cbc29
|
[
"MIT"
] | 1
|
2021-04-19T12:59:54.000Z
|
2021-04-19T12:59:54.000Z
|
model/utils.py
|
Open-Debin/aCASTLE
|
4c539c53ad35bae4592165ca45488d0de90cbc29
|
[
"MIT"
] | null | null | null |
model/utils.py
|
Open-Debin/aCASTLE
|
4c539c53ad35bae4592165ca45488d0de90cbc29
|
[
"MIT"
] | null | null | null |
import os
import shutil
import time
import pprint
import torch
import argparse
import numpy as np
## ------------------------ Basic Functions ------------------------
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))
if indices.is_cuda:
encoded_indicies = encoded_indicies.cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
_utils_pp = pprint.PrettyPrinter()
def compute_confidence_interval(data):
"""
Compute 95% confidence interval
:param data: An array of mean accuracy (or mAP) across a number of sampled episodes.
:return: the 95% confidence interval for this data.
"""
a = 1.0 * np.array(data)
m = np.mean(a)
std = np.std(a)
pm = 1.96 * (std / np.sqrt(len(a)))
return m, pm
## ------------------------ GFSL Measures ------------------------
# the method to count harmonic mean in low-shot learning paper
# based on the seen-joint and unseen_joint performnace
from sklearn.metrics import average_precision_score
# based on the seen-joint and unseen_joint performnace based on MAP
# change recall = tps / tps[-1] in sklearn/metrics/ranking.py to recall = np.ones(tps.size) if tps[-1] == 0 else tps / tps[-1]
## ------------------------GFSL Training Arguments Related ------------------------
## ------------------------GFSL Evaluation Arguments Related ------------------------
| 43.038554
| 167
| 0.641621
|
import os
import shutil
import time
import pprint
import torch
import argparse
import numpy as np
## ------------------------ Basic Functions ------------------------
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))
if indices.is_cuda:
encoded_indicies = encoded_indicies.cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def set_gpu(x):
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
def ensure_path(dir_path, scripts_to_save=None):
if os.path.exists(dir_path):
if input('{} exists, remove? ([y]/n)'.format(dir_path)) != 'n':
shutil.rmtree(dir_path)
os.mkdir(dir_path)
else:
os.mkdir(dir_path)
print('Experiment dir : {}'.format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
if not os.path.exists(script_path):
os.makedirs(script_path)
for src_file in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(src_file))
print('copy {} to {}'.format(src_file, dst_file))
if os.path.isdir(src_file):
shutil.copytree(src_file, dst_file)
else:
shutil.copyfile(src_file, dst_file)
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
else:
return (pred == label).type(torch.FloatTensor).mean().item()
def count_acc2(logits, label, mask1, mask2):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
temp = (pred == label).type(torch.cuda.FloatTensor)
acc = (torch.mean(temp.masked_select(mask1)) + torch.mean(temp.masked_select(mask2))) / 2
else:
temp = (pred == label).type(torch.FloatTensor)
acc = (torch.mean(temp.masked_select(mask1)) + torch.mean(temp.masked_select(mask2))) / 2
return acc.item()
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
return logits
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def compute_confidence_interval(data):
"""
Compute 95% confidence interval
:param data: An array of mean accuracy (or mAP) across a number of sampled episodes.
:return: the 95% confidence interval for this data.
"""
a = 1.0 * np.array(data)
m = np.mean(a)
std = np.std(a)
pm = 1.96 * (std / np.sqrt(len(a)))
return m, pm
def top1accuracy(output, target):
_, pred = output.max(dim=1)
pred = pred.view(-1)
target = target.view(-1)
accuracy = 100 * pred.eq(target).float().mean()
return accuracy
## ------------------------ GFSL Measures ------------------------
def count_acc_harmonic(logits, label, th):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
result = (pred == label).type(torch.cuda.FloatTensor)
else:
result = (pred == label).type(torch.FloatTensor)
seen_acc = result[:th].mean().item()
unseen_acc = result[th:].mean().item()
return 2 * (seen_acc * unseen_acc) / (seen_acc + unseen_acc + 1e-12)
# the method to count harmonic mean in low-shot learning paper
def count_acc_harmonic_low_shot(logits, label, th, nKbase):
seen_acc = top1accuracy(logits[:th,:nKbase], label[:th])
unseen_acc = top1accuracy(logits[th:,nKbase:], (label[th:]-nKbase))
return 2 * (seen_acc * unseen_acc) / (seen_acc + unseen_acc + 1e-12), seen_acc, unseen_acc
# based on the seen-joint and unseen_joint performnace
def count_acc_harmonic_low_shot_joint(logits, label, th):
seen_acc = top1accuracy(logits[:th, :], label[:th])
unseen_acc = top1accuracy(logits[th:, :], label[th:])
return 2 * (seen_acc * unseen_acc) / (seen_acc + unseen_acc + 1e-12), seen_acc, unseen_acc
def count_delta_value(logits, label, th, nKbase):
seen_acc = top1accuracy(logits[:th,:nKbase], label[:th])
unseen_acc = top1accuracy(logits[th:,nKbase:], (label[th:]-nKbase))
joint_acc = top1accuracy(logits, label)
delta1 = 0.5 * (seen_acc + unseen_acc - 2 * joint_acc)
seen_acc_joint = top1accuracy(logits[:th, :], label[:th])
unseen_acc_joint = top1accuracy(logits[th:, :], label[th:])
delta2 = 0.5 * ((seen_acc - seen_acc_joint) + (unseen_acc - unseen_acc_joint))
return delta1, delta2
from sklearn.metrics import average_precision_score
def compute_map(output, target, average_mode):
num_class = output.shape[1]
target = one_hot(target, num_class).cpu().numpy()
output = output.detach().cpu().numpy()
return average_precision_score(target, output, average = average_mode)
def compute_weight_map(output, target, th2=64):
num_class = output.shape[1]
target = one_hot(target, num_class).cpu().numpy()
output = output.cpu().numpy()
map_list1 = []
map_list2 = []
selected_mask1 = np.where(np.sum(target[:,:th2], 0) > 0)[0]
selected_mask2 = np.where(np.sum(target[:,th2:], 0) > 0)[0] + th2
for i in selected_mask1:
map_list1.append(average_precision_score(target[:,i], output[:,i]))
for i in selected_mask2:
map_list2.append(average_precision_score(target[:,i], output[:,i]))
map_seen = np.array(map_list1)
map_unseen = np.array(map_list2)
return np.mean(map_seen), np.mean(map_unseen)
# based on the seen-joint and unseen_joint performnace based on MAP
# change recall = tps / tps[-1] in sklearn/metrics/ranking.py to recall = np.ones(tps.size) if tps[-1] == 0 else tps / tps[-1]
def count_acc_harmonic_MAP(logits, label, th, average_mode = 'macro'):
if average_mode == 'weighted':
seen_map, unseen_map = compute_weight_map(logits, label)
else:
seen_map = compute_map(logits[:th, :], label[:th], average_mode)
unseen_map = compute_map(logits[th:, :], label[th:], average_mode)
return 2 * (seen_map * unseen_map) / (seen_map + unseen_map + 1e-12), seen_map, unseen_map
def AUC_eval_class_count(Ypred_S, Ypred_U, label_S, label_U, Ytrue):
# get number counts for AUC evaluation
L_S = label_S.shape[0]
L_U = label_U.shape[0]
class_count_S = np.zeros((L_S, 1))
class_count_U = np.zeros((L_U, 1))
class_correct_S = np.zeros((L_S, 1))
class_correct_U = np.zeros((L_U, 1))
class_count_S = [sum(Ytrue == label_S[i]) for i in range(L_S)]
class_correct_S = [sum((Ytrue == label_S[i]) & (Ypred_S == label_S[i])) for i in range(L_S)]
class_count_U = [sum(Ytrue == label_U[i]) for i in range(L_U)]
class_correct_U = [sum((Ytrue == label_U[i]) & (Ypred_U == label_U[i])) for i in range(L_U)]
class_count_S = np.array(class_count_S)
class_correct_S = np.array(class_correct_S)
class_count_U = np.array(class_count_U)
class_correct_U = np.array(class_correct_U)
class_count_S[class_count_S == 0] = 10 ^ 10;
class_count_U[class_count_U == 0] = 10 ^ 10;
return class_correct_S, class_correct_U, class_count_S, class_count_U
def Compute_HM(acc):
HM = 2 * acc[0] * acc[1] / (acc[0] + acc[1] + 1e-12);
return HM
def Compute_AUSUC(score_S, score_U, Y, label_S, label_U):
Y = Y.reshape(-1)
label_S = label_S.reshape(-1)
label_U = label_U.reshape(-1)
AUC_record = np.zeros((Y.shape[0] + 1, 2))
label_S = np.unique(label_S)
label_U = np.unique(label_U)
L_S = label_S.shape[0]
L_U = label_U.shape[0]
num_inst = score_S.shape[0]
# effective bias searching
loc_S = np.argmax(score_S, axis=1)
max_S = score_S[np.arange(num_inst), loc_S]
Ypred_S = label_S[loc_S]
loc_U = np.argmax(score_U, axis=1)
max_U = score_U[np.arange(num_inst), loc_U]
Ypred_U = label_U[loc_U]
class_correct_S, class_correct_U, class_count_S, class_count_U = AUC_eval_class_count(Ypred_S, Ypred_U, label_S, label_U, Y)
Y_correct_S = (Ypred_S == Y).astype(float)
Y_correct_U = (Ypred_U == Y).astype(float)
bias = max_S - max_U
loc_B = np.argsort(bias)
_, unique_bias_loc = np.unique(sorted(bias), return_index=True)
unique_bias_loc = unique_bias_loc[1:]
unique_bias_loc = np.append(unique_bias_loc, num_inst) - 1
bias = np.array(sorted(set(bias)))
# efficient evaluation
acc_change_S = np.divide(Y_correct_S[loc_B], class_count_S[loc_S[loc_B]] + 1e-12) / L_S
acc_change_U = np.divide(Y_correct_U[loc_B], class_count_U[loc_U[loc_B]] + 1e-12) / L_U
AUC_record[:, 0] = np.concatenate([np.array([0]), np.cumsum(-acc_change_S)]) + np.mean(class_correct_S / (class_count_S + 1e-12))
AUC_record[:, 1] = np.concatenate([np.array([0]), np.cumsum(acc_change_U)])
AUC_record = AUC_record[np.concatenate([np.array([0]), unique_bias_loc.reshape(-1)+ 1]), :]
AUC_record[AUC_record < 0] = 0
# Compute AUC
AUC_val = np.trapz(AUC_record[:, 0], AUC_record[:, 1])
return AUC_val, AUC_record
def Compute_biasedHM(score_S, score_U, Y, label_S, label_U, fixed_bias):
# fixed_bias is a list input
Y = Y.reshape(-1)
label_S = label_S.reshape(-1)
label_U = label_U.reshape(-1)
AUC_record = np.zeros((Y.shape[0] + 1, 2))
label_S = np.unique(label_S)
label_U = np.unique(label_U)
L_S = label_S.shape[0]
L_U = label_U.shape[0]
num_inst = score_S.shape[0]
# effective bias searching
loc_S = np.argmax(score_S, axis=1)
max_S = score_S[np.arange(num_inst), loc_S]
Ypred_S = label_S[loc_S]
loc_U = np.argmax(score_U, axis=1)
max_U = score_U[np.arange(num_inst), loc_U]
Ypred_U = label_U[loc_U]
class_correct_S, class_correct_U, class_count_S, class_count_U = AUC_eval_class_count(Ypred_S, Ypred_U, label_S, label_U, Y)
Y_correct_S = (Ypred_S == Y).astype(float)
Y_correct_U = (Ypred_U == Y).astype(float)
bias = max_S - max_U
loc_B = np.argsort(bias)
_, unique_bias_loc = np.unique(sorted(bias), return_index=True)
unique_bias_loc = unique_bias_loc[1:]
unique_bias_loc = np.append(unique_bias_loc, num_inst) - 1
bias = np.array(sorted(set(bias)))
# efficient evaluation
acc_change_S = np.divide(Y_correct_S[loc_B], class_count_S[loc_S[loc_B]] + 1e-12) / L_S
acc_change_U = np.divide(Y_correct_U[loc_B], class_count_U[loc_U[loc_B]] + 1e-12) / L_U
AUC_record[:, 0] = np.concatenate([np.array([0]), np.cumsum(-acc_change_S)]) + np.mean(class_correct_S / (class_count_S + 1e-12))
AUC_record[:, 1] = np.concatenate([np.array([0]), np.cumsum(acc_change_U)])
AUC_record = AUC_record[np.concatenate([np.array([0]), unique_bias_loc.reshape(-1)+ 1]), :]
AUC_record[AUC_record < 0] = 0
acc_noBias = AUC_record[sum(bias <= 0), :]
# Compute Harmonic mean
HM_nobias = Compute_HM(acc_noBias)
accs = [AUC_record[sum(bias <= f_bias), :] for f_bias in fixed_bias]
HM = [Compute_HM(acc) for acc in accs]
return HM, HM_nobias, accs, acc_noBias
## ------------------------GFSL Training Arguments Related ------------------------
def postprocess_args(args):
save_path1 = args.test_mode + '-' + '-'.join([args.dataset, args.model_class, args.backbone_class, '{:02d}w{:02d}s{:02}q'.format(args.way, args.shot, args.query)])
save_path2 = '_'.join([str('_'.join(args.step_size.split(','))), str(args.gamma),
'lr{:.2g}'.format(args.lr),
'btz{}'.format(args.batch_size),
str(args.lr_scheduler), str(args.temperature),
'ntask{}nclass{}T{}'.format(args.num_tasks, args.sample_class, args.temperature),
#'gpu{}'.format(args.gpu) if args.multi_gpu else 'gpu0',
# str(time.strftime('%Y%m%d_%H%M%S'))
])
if args.init_weights is not None:
save_path1 += '-Pre'
if args.augment:
save_path2 += '-Aug'
if args.lr_mul > 1.0:
save_path2 += 'lrmul{:.2g}'.format(args.lr_mul)
if args.model_class in ['Castle', 'ACastle']:
save_path2 += 'dp{}-h{}'.format(args.dp_rate, args.head)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
if not os.path.exists(os.path.join(args.save_dir, save_path1)):
os.mkdir(os.path.join(args.save_dir, save_path1))
args.save_path = os.path.join(args.save_dir, save_path1, save_path2)
return args
def get_command_line_parser():
parser = argparse.ArgumentParser()
# task configurations
parser.add_argument('--sample_class', type=int, default=8)
parser.add_argument('--way', type=int, default=5)
parser.add_argument('--eval_way', type=int, default=5)
parser.add_argument('--shot', type=int, default=1)
parser.add_argument('--eval_shot', type=int, default=1)
parser.add_argument('--query', type=int, default=15)
parser.add_argument('--eval_query', type=int, default=15)
parser.add_argument('--num_tasks', type=int, default=3)
parser.add_argument('--test_mode', type=str, default='FSL', choices=['FSL', 'GFSL']) # important
# optimization parameters
parser.add_argument('--max_epoch', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--lr_mul', type=float, default=1)
parser.add_argument('--lr_scheduler', type=str, default='cosine', choices=['multistep', 'step', 'cosine'])
parser.add_argument('--step_size', type=str, default='2,4,6,8')
parser.add_argument('--gamma', type=float, default=0.5)
parser.add_argument('--augment', action='store_true', default=False)
parser.add_argument('--gpu', default='0')
parser.add_argument('--init_weights', type=str, default=None)
parser.add_argument('--temperature', type=float, default=1)
# model parameters
parser.add_argument('--model_class', type=str, default='Castle', choices=['Castle', 'ACastle'])
parser.add_argument('--backbone_class', type=str, default='Res12', choices=['Res12'])
parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['MiniImageNet', 'TieredImageNet'])
# Castle-relate model parameters
parser.add_argument('--head', type=int, default=1)
parser.add_argument('--dp_rate', type=float, default=0.1)
# usually untouched parameters
parser.add_argument('--orig_imsize', type=int, default=-1) # -1 for no cache, and -2 for no resize
parser.add_argument('--mom', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.0005)
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--log_interval', type=int, default=100)
parser.add_argument('--eval_interval', type=int, default=200)
parser.add_argument('--save_dir', type=str, default='./checkpoints')
parser.add_argument('--num_eval_episodes', type=int, default=10000)
return parser
## ------------------------GFSL Evaluation Arguments Related ------------------------
def postprocess_eval_args(args):
assert(args.model_path is not None)
return args
def get_eval_command_line_parser():
parser = argparse.ArgumentParser()
# basic configurations
parser.add_argument('--eval_way', type=int, default=5)
parser.add_argument('--eval_shot', type=int, default=1)
parser.add_argument('--eval_query', type=int, default=15)
parser.add_argument('--gpu', default='0')
parser.add_argument('--model_path', type=str, default=None)
# criteria related
parser.add_argument('--criteria', type=str, help='A list contains criteria from [Acc, HMeanAcc, HMeanMAP, Delta, AUSUC]',
default='Acc, HMeanAcc, HMeanMAP, Delta, AUSUC') #
# model parameters
parser.add_argument('--model_class', type=str, default='Castle', choices=['CLS', 'ProtoNet', 'Castle', 'ACastle'])
parser.add_argument('--backbone_class', type=str, default='Res12', choices=['Res12'])
parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['MiniImageNet', 'TieredImageNet'])
# Castle-relate model parameters
parser.add_argument('--head', type=int, default=1)
parser.add_argument('--dp_rate', type=float, default=0.1)
# usually untouched parameters
parser.add_argument('--orig_imsize', type=int, default=-1) # -1 for no cache, and -2 for no resize
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--num_eval_episodes', type=int, default=500)
parser.add_argument('--temperature', type=float, default=1)
return parser
| 15,373
| -11
| 679
|
3908e95eda71412d07981a24d2f8a254c1d195e7
| 920
|
py
|
Python
|
tests/test_align.py
|
Wytamma/boiga
|
0efa7f910b090c7ef716501bff9ae9a753f3943e
|
[
"MIT"
] | null | null | null |
tests/test_align.py
|
Wytamma/boiga
|
0efa7f910b090c7ef716501bff9ae9a753f3943e
|
[
"MIT"
] | 2
|
2020-06-26T13:17:22.000Z
|
2020-06-27T04:57:37.000Z
|
tests/test_align.py
|
Wytamma/boiga
|
0efa7f910b090c7ef716501bff9ae9a753f3943e
|
[
"MIT"
] | null | null | null |
from pyoinformatics.align import lcs, format_matrix
from pyoinformatics.seq import Seq
| 24.864865
| 66
| 0.401087
|
from pyoinformatics.align import lcs, format_matrix
from pyoinformatics.seq import Seq
def test_lcs():
assert lcs(Seq("AACCTTGG"), Seq("ACACTGTGA")) == Seq("AACTTG")
def test_format_matrix():
Seq1 = Seq("AACCTTGG")
Seq2 = Seq("ACACTGTGA")
M = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2],
[1, 2, 2, 2, 2, 2, 2, 2],
[1, 2, 3, 3, 3, 3, 3, 3],
[1, 2, 3, 3, 4, 4, 4, 4],
[1, 2, 3, 3, 4, 4, 5, 5],
[1, 2, 3, 3, 4, 5, 5, 5],
[1, 2, 3, 3, 4, 5, 6, 6],
[1, 2, 3, 3, 4, 5, 6, 6],
]
assert format_matrix(Seq1, Seq2, M) == [
" A A C C T T G G",
"A 1 1 1 1 1 1 1 1",
"C 1 1 2 2 2 2 2 2",
"A 1 2 2 2 2 2 2 2",
"C 1 2 3 3 3 3 3 3",
"T 1 2 3 3 4 4 4 4",
"G 1 2 3 3 4 4 5 5",
"T 1 2 3 3 4 5 5 5",
"G 1 2 3 3 4 5 6 6",
"A 1 2 3 3 4 5 6 6",
]
| 785
| 0
| 46
|
1ac556c1e79ea42c23fedfe570a7165833c427cc
| 1,569
|
py
|
Python
|
openhab_creator/output/items/__init__.py
|
DerOetzi/openhab_creator
|
197876df5aae84192c34418f6b9a7cfcee23b195
|
[
"MIT"
] | 1
|
2021-11-16T22:48:26.000Z
|
2021-11-16T22:48:26.000Z
|
openhab_creator/output/items/__init__.py
|
DerOetzi/openhab_creator
|
197876df5aae84192c34418f6b9a7cfcee23b195
|
[
"MIT"
] | null | null | null |
openhab_creator/output/items/__init__.py
|
DerOetzi/openhab_creator
|
197876df5aae84192c34418f6b9a7cfcee23b195
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import os
from importlib import import_module
from typing import TYPE_CHECKING, List, Type, Dict, Union
from openhab_creator import logger
if TYPE_CHECKING:
from openhab_creator.models.configuration import Configuration
from openhab_creator.output.items.baseitemscreator import BaseItemsCreator
| 30.173077
| 82
| 0.67304
|
from __future__ import annotations
import os
from importlib import import_module
from typing import TYPE_CHECKING, List, Type, Dict, Union
from openhab_creator import logger
if TYPE_CHECKING:
from openhab_creator.models.configuration import Configuration
from openhab_creator.output.items.baseitemscreator import BaseItemsCreator
class ItemsCreator(object):
def __init__(self, outputdir: str):
self.outputdir: str = outputdir
def build(self, configuration: Configuration) -> None:
ItemsCreatorPipeline.build(self.outputdir, configuration)
class ItemsCreatorPipeline(object):
pipeline: List[Dict[str, Union[int, Type[BaseItemsCreator]]]] = []
initialized: bool = False
def __init__(self, order_id: int):
self.order_id: int = order_id
def __call__(self, itemscreator_cls: Type[BaseItemsCreator]):
ItemsCreatorPipeline.pipeline.insert(self.order_id, {
'order': self.order_id,
'class': itemscreator_cls
})
@classmethod
def _init(cls):
if not cls.initialized:
import_module(
'openhab_creator.output.items.creators')
cls.initialized = True
@classmethod
def build(cls, outputdir: str, configuration: Configuration) -> None:
cls._init()
for creator in sorted(cls.pipeline, key=lambda x: x['order']):
logger.info(
f'Item creator: {creator["class"].__name__} ({creator["order"]})')
c = creator['class'](outputdir)
c.build(configuration)
| 864
| 263
| 99
|
283f91dc3ccaf741fa166962553da42322f2d25f
| 1,736
|
py
|
Python
|
clean.py
|
SatvikVejendla/Titanic-Survival-AI
|
bff812d1753de361a60202e9e729b3780b8a50ac
|
[
"MIT"
] | null | null | null |
clean.py
|
SatvikVejendla/Titanic-Survival-AI
|
bff812d1753de361a60202e9e729b3780b8a50ac
|
[
"MIT"
] | null | null | null |
clean.py
|
SatvikVejendla/Titanic-Survival-AI
|
bff812d1753de361a60202e9e729b3780b8a50ac
|
[
"MIT"
] | null | null | null |
import pandas as pd
from imblearn.over_sampling import RandomOverSampler
import math
#Training Data
re = RandomOverSampler()
df = pd.read_csv("data/raw/train.csv")
y = df["Survived"]
x = df.drop(["Survived", "Cabin", "Name", "PassengerId", "Ticket"], axis=1)
embark = ["C", "Q", "S"]
genders = ["male", "female"]
for i, v in enumerate(x["Embarked"]):
try:
x.at[i, "Embarked"] = embark.index(v) + 1
except ValueError as n:
x.at[i, "Embarked"] = 0
for i, v in enumerate(x["Sex"]):
x.at[i, "Sex"] = genders.index(v)
mean_age = df.describe()["Age"]["mean"]
for i, v in enumerate(x["Age"]):
if(math.isnan(v)):
x.at[i, "Age"] = mean_age
x,y = re.fit_resample(x,y)
df = pd.concat([x,y], axis=1)
df.to_csv("data/processed/train.csv", index=False)
#Test Data
test_df = pd.read_csv("data/raw/test.csv")
test_df = test_df.drop(["Cabin", "Name", "PassengerId", "Ticket"], axis=1)
for i, v in enumerate(test_df["Embarked"]):
try:
test_df.at[i, "Embarked"] = embark.index(v) + 1
except ValueError as n:
test_df.at[i, "Embarked"] = 0
for i, v in enumerate(test_df["Sex"]):
test_df.at[i, "Sex"] = genders.index(v)
for i, v in enumerate(test_df["Age"]):
if(math.isnan(v)):
test_df.at[i, "Age"] = mean_age
for i, v in enumerate(test_df["Age"]):
if(math.isnan(v)):
test_df.at[i, "Age"] = mean_age
mean_fare = df.describe()["Fare"]["mean"]
for i, v in enumerate(test_df["Fare"]):
if(math.isnan(v)):
test_df.at[i, "Fare"] = mean_fare
test_y = pd.read_csv("data/raw/gender.csv")
test_y = test_y.drop(["PassengerId"], axis=1)
test_df = pd.concat([test_df, test_y], axis=1)
test_df.to_csv("data/processed/test.csv", index=False)
| 22.842105
| 75
| 0.619816
|
import pandas as pd
from imblearn.over_sampling import RandomOverSampler
import math
#Training Data
re = RandomOverSampler()
df = pd.read_csv("data/raw/train.csv")
y = df["Survived"]
x = df.drop(["Survived", "Cabin", "Name", "PassengerId", "Ticket"], axis=1)
embark = ["C", "Q", "S"]
genders = ["male", "female"]
for i, v in enumerate(x["Embarked"]):
try:
x.at[i, "Embarked"] = embark.index(v) + 1
except ValueError as n:
x.at[i, "Embarked"] = 0
for i, v in enumerate(x["Sex"]):
x.at[i, "Sex"] = genders.index(v)
mean_age = df.describe()["Age"]["mean"]
for i, v in enumerate(x["Age"]):
if(math.isnan(v)):
x.at[i, "Age"] = mean_age
x,y = re.fit_resample(x,y)
df = pd.concat([x,y], axis=1)
df.to_csv("data/processed/train.csv", index=False)
#Test Data
test_df = pd.read_csv("data/raw/test.csv")
test_df = test_df.drop(["Cabin", "Name", "PassengerId", "Ticket"], axis=1)
for i, v in enumerate(test_df["Embarked"]):
try:
test_df.at[i, "Embarked"] = embark.index(v) + 1
except ValueError as n:
test_df.at[i, "Embarked"] = 0
for i, v in enumerate(test_df["Sex"]):
test_df.at[i, "Sex"] = genders.index(v)
for i, v in enumerate(test_df["Age"]):
if(math.isnan(v)):
test_df.at[i, "Age"] = mean_age
for i, v in enumerate(test_df["Age"]):
if(math.isnan(v)):
test_df.at[i, "Age"] = mean_age
mean_fare = df.describe()["Fare"]["mean"]
for i, v in enumerate(test_df["Fare"]):
if(math.isnan(v)):
test_df.at[i, "Fare"] = mean_fare
test_y = pd.read_csv("data/raw/gender.csv")
test_y = test_y.drop(["PassengerId"], axis=1)
test_df = pd.concat([test_df, test_y], axis=1)
test_df.to_csv("data/processed/test.csv", index=False)
| 0
| 0
| 0
|
641b121922a6092e913d2f84335b0aad5dc05398
| 2,145
|
py
|
Python
|
src/harness/testcases/cu_pass/dpa_calculator/features/steps/dpa_neighborhood/environment/contexts/context_docker.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | null | null | null |
src/harness/testcases/cu_pass/dpa_calculator/features/steps/dpa_neighborhood/environment/contexts/context_docker.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | null | null | null |
src/harness/testcases/cu_pass/dpa_calculator/features/steps/dpa_neighborhood/environment/contexts/context_docker.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from cu_pass.dpa_calculator.aggregate_interference_calculator.configuration.support.eirps import \
EIRP_DISTRIBUTION_MAP_TYPE
from cu_pass.dpa_calculator.cbsd.cbsd import CbsdCategories
from cu_pass.dpa_calculator.dpa.builder import RadioAstronomyFacilityNames
from cu_pass.dpa_calculator.dpa.dpa import Dpa
from testcases.cu_pass.dpa_calculator.features.environment.hooks import ContextSas
from testcases.cu_pass.dpa_calculator.features.steps.dpa_neighborhood.environment.contexts.context_cbsd_deployment_options import \
ContextCbsdDeploymentOptions
from testcases.cu_pass.dpa_calculator.features.steps.dpa_neighborhood.environment.contexts.context_monte_carlo_iterations import \
ContextMonteCarloIterations
from testcases.cu_pass.dpa_calculator.features.steps.dpa_neighborhood.environment.parsers.parse_dpa import parse_dpa
ARBITRARY_BUCKET_NAME = 'arbitrary_bucket_name'
ARBITRARY_DPA_NAME = RadioAstronomyFacilityNames.HatCreek.value
ARBITRARY_NUMBER_OF_ITERATIONS = 1
ARBITRARY_RADIUS_IN_KILOMETERS = 2
ARBITRARY_OUTPUT_DIRECTORY = 'arbitrary_output_directory'
| 44.6875
| 131
| 0.844289
|
from typing import List
from cu_pass.dpa_calculator.aggregate_interference_calculator.configuration.support.eirps import \
EIRP_DISTRIBUTION_MAP_TYPE
from cu_pass.dpa_calculator.cbsd.cbsd import CbsdCategories
from cu_pass.dpa_calculator.dpa.builder import RadioAstronomyFacilityNames
from cu_pass.dpa_calculator.dpa.dpa import Dpa
from testcases.cu_pass.dpa_calculator.features.environment.hooks import ContextSas
from testcases.cu_pass.dpa_calculator.features.steps.dpa_neighborhood.environment.contexts.context_cbsd_deployment_options import \
ContextCbsdDeploymentOptions
from testcases.cu_pass.dpa_calculator.features.steps.dpa_neighborhood.environment.contexts.context_monte_carlo_iterations import \
ContextMonteCarloIterations
from testcases.cu_pass.dpa_calculator.features.steps.dpa_neighborhood.environment.parsers.parse_dpa import parse_dpa
ARBITRARY_BUCKET_NAME = 'arbitrary_bucket_name'
ARBITRARY_DPA_NAME = RadioAstronomyFacilityNames.HatCreek.value
ARBITRARY_NUMBER_OF_ITERATIONS = 1
ARBITRARY_RADIUS_IN_KILOMETERS = 2
ARBITRARY_OUTPUT_DIRECTORY = 'arbitrary_output_directory'
class ContextDocker(ContextCbsdDeploymentOptions, ContextMonteCarloIterations, ContextSas):
beamwidth: float
dpa: Dpa
eirp_distribution: EIRP_DISTRIBUTION_MAP_TYPE
local_output_directory: str
include_ue_runs: bool
interference_threshold: int
neighborhood_categories: List[CbsdCategories]
precreate_bucket: bool
s3_bucket: str
s3_output_directory: str
def set_docker_context_defaults(context: ContextDocker) -> None:
context.beamwidth = None
context.dpa = parse_dpa(text=ARBITRARY_DPA_NAME)
context.eirp_distribution = None
context.include_ue_runs = False
context.local_output_directory = ARBITRARY_OUTPUT_DIRECTORY
context.interference_threshold = None
context.neighborhood_categories = []
context.precreate_bucket = True
context.number_of_iterations = ARBITRARY_NUMBER_OF_ITERATIONS
context.simulation_area_radius = ARBITRARY_RADIUS_IN_KILOMETERS
context.s3_bucket = ARBITRARY_BUCKET_NAME
context.s3_output_directory = ARBITRARY_OUTPUT_DIRECTORY
| 622
| 369
| 46
|
bf6189581c930d132f1857c28d3c0be6972362de
| 6,966
|
py
|
Python
|
tftpy/context/client.py
|
jcarswell/tftpy
|
9c171c7e969b80f2c00728df21d5534b3191620a
|
[
"MIT"
] | null | null | null |
tftpy/context/client.py
|
jcarswell/tftpy
|
9c171c7e969b80f2c00728df21d5534b3191620a
|
[
"MIT"
] | null | null | null |
tftpy/context/client.py
|
jcarswell/tftpy
|
9c171c7e969b80f2c00728df21d5534b3191620a
|
[
"MIT"
] | null | null | null |
import logging
import os
import sys
import time
from typing import Union
from io import IOBase
from .base import Client
from tftpy.shared import TIMEOUT_RETRIES
from tftpy.packet import types
from tftpy.exceptions import TftpException,TftpTimeout,TftpFileNotFoundError
from tftpy.states import SentReadRQ,SentWriteRQ
logger = logging.getLogger('tftpy.context.client')
class Upload(Client):
"""The upload context for the client during an upload.
Note: If input is a hyphen, then we will use stdin."""
def __init__(self, host: str, port: int, timeout: int,
input: Union[IOBase,str], **kwargs) -> None:
"""Upload context for uploading data to a server.
Args:
host (str): Server Address
port (int): Server Port
timeout (int): socket timeout
input ([IOBase,str]): Input data, can be one of
- An open file object
- A path to a file
- a '-' indicating read from STDIN
"""
super().__init__(host, port, timeout, **kwargs)
# If the input object has a read() function, assume it is file-like.
if hasattr(input, 'read'):
self.fileobj = input
elif input == '-':
self.fileobj = sys.stdin
else:
self.fileobj = open(input, "rb")
logger.debug("tftpy.context.client.upload.__init__()")
logger.debug(f" file_to_transfer = {self.file_to_transfer}, options = {self.options}")
def start(self) -> None:
"""Main loop to read data in and send file to the server."""
logger.info(f"Sending tftp upload request to {self.host}")
logger.info(f" filename -> {self.file_to_transfer}")
logger.info(f" options -> {self.options}")
self.metrics.start_time = time.time()
logger.debug(f"Set metrics.start_time to {self.metrics.start_time}")
pkt = types.WriteRQ()
pkt.filename = self.file_to_transfer
pkt.mode = self.mode
pkt.options = self.options
self.send(pkt)
self.state = SentWriteRQ(self)
while self.state:
try:
logger.debug(f"State is {self.state}")
self.cycle()
except TftpTimeout as err:
logger.error(str(err))
self.retry_count += 1
if self.retry_count >= TIMEOUT_RETRIES:
logger.debug("hit max retries, giving up")
raise
else:
logger.warning("resending last packet")
self.state.resend_last()
def end(self, *args):
"""Finish up the context."""
super().end()
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute()
class Download(Client):
"""The download context for the client during a download.
Note: If output is a hyphen, then the output will be sent to stdout."""
def __init__(self, host: str, port: int, timeout: int,
output: Union[IOBase,str], **kwargs) -> None:
"""Initalize the Download context with the server and
where to save the data
Args:
host (str): Server Address
port (int): Server port
timeout (int): Socket Timeout
output (Union[IOBase,str]): Output data, can be one of
- An open file object
- A path to a file
- '-' indicating write to STDOUT
Raises:
TftpException: unable to open the destiation file for writing
"""
super().__init__(host, port, timeout, **kwargs)
self.filelike_fileobj = False
# If the output object has a write() function, assume it is file-like.
if hasattr(output, 'write'):
self.fileobj = output
self.filelike_fileobj = True
# If the output filename is -, then use stdout
elif output == '-':
self.fileobj = sys.stdout
self.filelike_fileobj = True
else:
try:
self.fileobj = open(output, "wb")
except OSError as err:
raise TftpException("Could not open output file", err)
logger.debug("tftpy.context.client.Download.__init__()")
logger.debug(f" file_to_transfer = {self.file_to_transfer}, options = {self.options}")
def start(self) -> None:
"""Initiate the download.
Raises:
TftpTimeout: Failed to connect to the server
TftpFileNotFoundError: Recieved a File not fount error
"""
logger.info(f"Sending tftp download request to {self.host}")
logger.info(f" filename -> {self.file_to_transfer}")
logger.info(f" options -> {self.options}")
self.metrics.start_time = time.time()
logger.debug(f"Set metrics.start_time to {self.metrics.start_time}")
pkt = types.ReadRQ()
pkt.filename = self.file_to_transfer
pkt.mode = self.mode
pkt.options = self.options
self.send(pkt)
self.state = SentReadRQ(self)
while self.state:
try:
logger.debug(f"State is {self.state}")
self.cycle()
except TftpTimeout as err:
logger.error(str(err))
self.retry_count += 1
if self.retry_count >= TIMEOUT_RETRIES:
logger.debug("hit max retries, giving up")
raise TftpTimeout("Max retries reached")
else:
logger.warning("resending last packet")
self.state.resend_last()
except TftpFileNotFoundError as err:
# If we received file not found, then we should not save the open
# output file or we'll be left with a size zero file. Delete it,
# if it exists.
logger.error("Received File not found error")
if self.fileobj is not None and not self.filelike_fileobj and os.path.exists(self.fileobj.name):
logger.debug(f"unlinking output file of {self.fileobj.name}")
os.unlink(self.fileobj.name)
raise TftpFileNotFoundError(err)
def end(self) -> None:
"""Finish up the context."""
super().end(not self.filelike_fileobj)
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute()
| 36.663158
| 113
| 0.548665
|
import logging
import os
import sys
import time
from typing import Union
from io import IOBase
from .base import Client
from tftpy.shared import TIMEOUT_RETRIES
from tftpy.packet import types
from tftpy.exceptions import TftpException,TftpTimeout,TftpFileNotFoundError
from tftpy.states import SentReadRQ,SentWriteRQ
logger = logging.getLogger('tftpy.context.client')
class Upload(Client):
"""The upload context for the client during an upload.
Note: If input is a hyphen, then we will use stdin."""
def __init__(self, host: str, port: int, timeout: int,
input: Union[IOBase,str], **kwargs) -> None:
"""Upload context for uploading data to a server.
Args:
host (str): Server Address
port (int): Server Port
timeout (int): socket timeout
input ([IOBase,str]): Input data, can be one of
- An open file object
- A path to a file
- a '-' indicating read from STDIN
"""
super().__init__(host, port, timeout, **kwargs)
# If the input object has a read() function, assume it is file-like.
if hasattr(input, 'read'):
self.fileobj = input
elif input == '-':
self.fileobj = sys.stdin
else:
self.fileobj = open(input, "rb")
logger.debug("tftpy.context.client.upload.__init__()")
logger.debug(f" file_to_transfer = {self.file_to_transfer}, options = {self.options}")
def start(self) -> None:
"""Main loop to read data in and send file to the server."""
logger.info(f"Sending tftp upload request to {self.host}")
logger.info(f" filename -> {self.file_to_transfer}")
logger.info(f" options -> {self.options}")
self.metrics.start_time = time.time()
logger.debug(f"Set metrics.start_time to {self.metrics.start_time}")
pkt = types.WriteRQ()
pkt.filename = self.file_to_transfer
pkt.mode = self.mode
pkt.options = self.options
self.send(pkt)
self.state = SentWriteRQ(self)
while self.state:
try:
logger.debug(f"State is {self.state}")
self.cycle()
except TftpTimeout as err:
logger.error(str(err))
self.retry_count += 1
if self.retry_count >= TIMEOUT_RETRIES:
logger.debug("hit max retries, giving up")
raise
else:
logger.warning("resending last packet")
self.state.resend_last()
def end(self, *args):
"""Finish up the context."""
super().end()
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute()
class Download(Client):
"""The download context for the client during a download.
Note: If output is a hyphen, then the output will be sent to stdout."""
def __init__(self, host: str, port: int, timeout: int,
output: Union[IOBase,str], **kwargs) -> None:
"""Initalize the Download context with the server and
where to save the data
Args:
host (str): Server Address
port (int): Server port
timeout (int): Socket Timeout
output (Union[IOBase,str]): Output data, can be one of
- An open file object
- A path to a file
- '-' indicating write to STDOUT
Raises:
TftpException: unable to open the destiation file for writing
"""
super().__init__(host, port, timeout, **kwargs)
self.filelike_fileobj = False
# If the output object has a write() function, assume it is file-like.
if hasattr(output, 'write'):
self.fileobj = output
self.filelike_fileobj = True
# If the output filename is -, then use stdout
elif output == '-':
self.fileobj = sys.stdout
self.filelike_fileobj = True
else:
try:
self.fileobj = open(output, "wb")
except OSError as err:
raise TftpException("Could not open output file", err)
logger.debug("tftpy.context.client.Download.__init__()")
logger.debug(f" file_to_transfer = {self.file_to_transfer}, options = {self.options}")
def start(self) -> None:
"""Initiate the download.
Raises:
TftpTimeout: Failed to connect to the server
TftpFileNotFoundError: Recieved a File not fount error
"""
logger.info(f"Sending tftp download request to {self.host}")
logger.info(f" filename -> {self.file_to_transfer}")
logger.info(f" options -> {self.options}")
self.metrics.start_time = time.time()
logger.debug(f"Set metrics.start_time to {self.metrics.start_time}")
pkt = types.ReadRQ()
pkt.filename = self.file_to_transfer
pkt.mode = self.mode
pkt.options = self.options
self.send(pkt)
self.state = SentReadRQ(self)
while self.state:
try:
logger.debug(f"State is {self.state}")
self.cycle()
except TftpTimeout as err:
logger.error(str(err))
self.retry_count += 1
if self.retry_count >= TIMEOUT_RETRIES:
logger.debug("hit max retries, giving up")
raise TftpTimeout("Max retries reached")
else:
logger.warning("resending last packet")
self.state.resend_last()
except TftpFileNotFoundError as err:
# If we received file not found, then we should not save the open
# output file or we'll be left with a size zero file. Delete it,
# if it exists.
logger.error("Received File not found error")
if self.fileobj is not None and not self.filelike_fileobj and os.path.exists(self.fileobj.name):
logger.debug(f"unlinking output file of {self.fileobj.name}")
os.unlink(self.fileobj.name)
raise TftpFileNotFoundError(err)
def end(self) -> None:
"""Finish up the context."""
super().end(not self.filelike_fileobj)
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute()
| 0
| 0
| 0
|
b479d83b071cc8495e2321dfdca00c29ab7b085e
| 1,778
|
py
|
Python
|
lola/tests/test_coin_game.py
|
jleni/lola
|
9b9a2122aefc97d9ed1529b875912816f1acb5d6
|
[
"MIT"
] | 125
|
2018-07-08T18:50:08.000Z
|
2022-03-07T09:31:12.000Z
|
lola/tests/test_coin_game.py
|
jleni/lola
|
9b9a2122aefc97d9ed1529b875912816f1acb5d6
|
[
"MIT"
] | 11
|
2018-07-09T17:55:56.000Z
|
2021-04-13T23:49:40.000Z
|
lola/tests/test_coin_game.py
|
jleni/lola
|
9b9a2122aefc97d9ed1529b875912816f1acb5d6
|
[
"MIT"
] | 36
|
2018-07-09T08:05:14.000Z
|
2022-03-12T19:52:49.000Z
|
import importlib
max_steps = 1000
terminate_prob = 0.998
batch_size = 5
gameEnv = importlib.import_module('coin_game_v')
env = gameEnv.gameEnv(terminate_prob=terminate_prob, max_steps=max_steps, batch_size=batch_size)
print('state_space', env.state_space)
print('red_pos', env.red_pos)
print('blue_pos', env.blue_pos)
print('red_coin', env.red_coin)
print('coin_pos', env.coin_pos)
# test red agent picks up red coin
env.red_coin = [1, 1, 1, 1, 1]
env.red_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
state, reward, done = env.step(actions=[[1,1], [1,1], [1,1], [1,1], [1,1]])
print('red_pos', env.red_pos)
print('blue_pos', env.blue_pos)
print('red_coin', env.red_coin)
print('coin_pos', env.coin_pos)
print('reward', reward)
print('state', state)
# # test red agent picks up blue coin
# env.red_coin = 0
# env.red_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
# _, reward, done = env.step(action=1, agent='red')
# print('red_pos', env.red_pos)
# print('blue_pos', env.blue_pos)
# print('red_coin', env.red_coin)
# print('coin_pos', env.coin_pos)
# print('reward', reward)
# # test blue agent picks up red coin
# env.red_coin = 1
# env.blue_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
# _, reward, done = env.step(action=1, agent='blue')
# print('red_pos', env.red_pos)
# print('blue_pos', env.blue_pos)
# print('red_coin', env.red_coin)
# print('coin_pos', env.coin_pos)
# print('reward', reward)
# # test blue agent picks up blue coin
# env.red_coin = 0
# env.blue_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
# _, reward, done = env.step(action=1, agent='blue')
# print('red_pos', env.red_pos)
# print('blue_pos', env.blue_pos)
# print('red_coin', env.red_coin)
# print('coin_pos', env.coin_pos)
# print('reward', reward)
| 31.192982
| 96
| 0.697975
|
import importlib
max_steps = 1000
terminate_prob = 0.998
batch_size = 5
gameEnv = importlib.import_module('coin_game_v')
env = gameEnv.gameEnv(terminate_prob=terminate_prob, max_steps=max_steps, batch_size=batch_size)
print('state_space', env.state_space)
print('red_pos', env.red_pos)
print('blue_pos', env.blue_pos)
print('red_coin', env.red_coin)
print('coin_pos', env.coin_pos)
# test red agent picks up red coin
env.red_coin = [1, 1, 1, 1, 1]
env.red_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
state, reward, done = env.step(actions=[[1,1], [1,1], [1,1], [1,1], [1,1]])
print('red_pos', env.red_pos)
print('blue_pos', env.blue_pos)
print('red_coin', env.red_coin)
print('coin_pos', env.coin_pos)
print('reward', reward)
print('state', state)
# # test red agent picks up blue coin
# env.red_coin = 0
# env.red_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
# _, reward, done = env.step(action=1, agent='red')
# print('red_pos', env.red_pos)
# print('blue_pos', env.blue_pos)
# print('red_coin', env.red_coin)
# print('coin_pos', env.coin_pos)
# print('reward', reward)
# # test blue agent picks up red coin
# env.red_coin = 1
# env.blue_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
# _, reward, done = env.step(action=1, agent='blue')
# print('red_pos', env.red_pos)
# print('blue_pos', env.blue_pos)
# print('red_coin', env.red_coin)
# print('coin_pos', env.coin_pos)
# print('reward', reward)
# # test blue agent picks up blue coin
# env.red_coin = 0
# env.blue_pos = ( env.coin_pos - env.actions[1] ) % env.grid_size
# _, reward, done = env.step(action=1, agent='blue')
# print('red_pos', env.red_pos)
# print('blue_pos', env.blue_pos)
# print('red_coin', env.red_coin)
# print('coin_pos', env.coin_pos)
# print('reward', reward)
| 0
| 0
| 0
|
1b639764948babc8a713fc8d7eae08b3a9be84bf
| 1,780
|
py
|
Python
|
src/sprites/Upgrade.py
|
NEKERAFA/Soul-Tower
|
d37c0bf6bcbf253ec5b2c41f802adeeca31fb384
|
[
"MIT"
] | null | null | null |
src/sprites/Upgrade.py
|
NEKERAFA/Soul-Tower
|
d37c0bf6bcbf253ec5b2c41f802adeeca31fb384
|
[
"MIT"
] | null | null | null |
src/sprites/Upgrade.py
|
NEKERAFA/Soul-Tower
|
d37c0bf6bcbf253ec5b2c41f802adeeca31fb384
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pygame, os
from src.sprites.MyStaticSprite import *
from src.sprites.Interactive import *
from src.ResourceManager import *
from src.scenes.stage.OnDialogueState import *
SPRITE_FILES = os.path.join("sprites", "interactives")
| 40.454545
| 108
| 0.629775
|
# -*- coding: utf-8 -*-
import pygame, os
from src.sprites.MyStaticSprite import *
from src.sprites.Interactive import *
from src.ResourceManager import *
from src.scenes.stage.OnDialogueState import *
SPRITE_FILES = os.path.join("sprites", "interactives")
class Upgrade(MyStaticSprite, Interactive):
def __init__(self, position, imageFile, cost, upgrade):
# Llamamos al constructor de la clase
MyStaticSprite.__init__(self)
# Obtenemos la imagen
self.image = ResourceManager.load_image(os.path.join(SPRITE_FILES, imageFile), -1)
self.rect = self.image.get_rect()
self.rect.bottomleft = position
# Cargamos el objeto interactivo
Interactive.__init__(self, self.rect)
# Cambiamos la posición del sprite
self.change_position(position)
# Guardamos el coste y de quien es la mejora
self.cost = cost
self.upgrade = upgrade
def activate(self, stage):
if stage.player.souls >= self.cost:
# Llamamos al jugador para quitarle almas
stage.player.decrement_souls(self.cost)
if self.upgrade == 'ranged':
# Mejora de leraila
stage.set_state(OnDialogueState('lerailaUpgrade.json', stage))
stage.player.rangedLevel += 1
elif self.upgrade == 'melee':
# Mejora de Daric
stage.player.meleeLevel += 1
stage.set_state(OnDialogueState('daricUpgrade.json', stage))
# Quitamos el sprite de todos los grupos
self.kill()
else:
dialogue = [{"text": [["Necesitas " + str(self.cost) + " almas para obtener esta", "mejora."]]}]
stage.set_state(OnDialogueState(dialogue, stage, False))
| 1,424
| 22
| 76
|
e911a7d22b3f23667f14d95f7d6333c90dc8d5e9
| 1,034
|
py
|
Python
|
ml_ops/visualization_blog/lambdas/createdataset/dataset.py
|
sriharshams/amazon-forecast-samples
|
d33392329311f52e86fb414ffd9a1e944d36e881
|
[
"MIT-0"
] | 405
|
2018-12-02T21:36:15.000Z
|
2022-03-29T12:52:40.000Z
|
ml_ops/visualization_blog/lambdas/createdataset/dataset.py
|
sriharshams/amazon-forecast-samples
|
d33392329311f52e86fb414ffd9a1e944d36e881
|
[
"MIT-0"
] | 78
|
2018-12-20T21:33:02.000Z
|
2022-02-02T12:43:27.000Z
|
ml_ops/visualization_blog/lambdas/createdataset/dataset.py
|
sriharshams/amazon-forecast-samples
|
d33392329311f52e86fb414ffd9a1e944d36e881
|
[
"MIT-0"
] | 344
|
2018-12-11T15:58:25.000Z
|
2022-03-31T11:09:41.000Z
|
from os import environ
from boto3 import client
import actions
from loader import Loader
ACCOUNTID = client('sts').get_caller_identity()['Account']
ARN = 'arn:aws:forecast:{region}:{account}:dataset/{name}'
LOADER = Loader()
| 30.411765
| 79
| 0.673114
|
from os import environ
from boto3 import client
import actions
from loader import Loader
ACCOUNTID = client('sts').get_caller_identity()['Account']
ARN = 'arn:aws:forecast:{region}:{account}:dataset/{name}'
LOADER = Loader()
def lambda_handler(event, context):
datasets = event['params']['Datasets']
status = None
event['DatasetArn'] = ARN.format(
account=ACCOUNTID,
name=datasets[0]['DatasetName'],
region=environ['AWS_REGION']
)
event['AccountID'] = ACCOUNTID
try:
status = LOADER.forecast_cli.describe_dataset(
DatasetArn=event['DatasetArn']
)
except LOADER.forecast_cli.exceptions.ResourceNotFoundException:
LOADER.logger.info('Dataset not found! Will follow to create dataset.')
for dataset in datasets:
LOADER.forecast_cli.create_dataset(**dataset)
status = LOADER.forecast_cli.describe_dataset(
DatasetArn=event['DatasetArn']
)
actions.take_action(status['Status'])
return event
| 784
| 0
| 23
|
3a807d7d060e94e9383acbae2b77682c4bf36733
| 2,834
|
py
|
Python
|
examples/id_based_encryption.py
|
elliptic-shiho/ecpy
|
ccdb872124ca2c218b8a7261a2956efd5ec83705
|
[
"MIT"
] | 48
|
2016-03-30T07:20:49.000Z
|
2022-01-27T10:48:43.000Z
|
examples/id_based_encryption.py
|
elliptic-shiho/ecpy
|
ccdb872124ca2c218b8a7261a2956efd5ec83705
|
[
"MIT"
] | 11
|
2017-03-26T11:03:20.000Z
|
2021-06-01T15:54:03.000Z
|
examples/id_based_encryption.py
|
elliptic-shiho/ecpy
|
ccdb872124ca2c218b8a7261a2956efd5ec83705
|
[
"MIT"
] | 12
|
2016-06-05T19:09:26.000Z
|
2021-04-18T04:23:20.000Z
|
from ecpy import EllipticCurve, ExtendedFiniteField, symmetric_tate_pairing
import hashlib
import random
import cPickle
# PKI secret
secret = 0xdeadbeef
p = int("501794446334189957604282155189438160845433783392772743395579628617109"
"929160215221425142482928909270259580854362463493326988807453595748573"
"76419559953437557")
l = (p + 1) / 6
F = ExtendedFiniteField(p, "x^2+x+1")
E = EllipticCurve(F, 0, 1)
P = E(3, int("1418077311270457886139292292020587683642898636677353664354101171"
"7684401801069777797699258667061922178009879315047772033936311133"
"535564812495329881887557081"))
sP = E(int("129862491850266001914601437161941818413833907050695770313188660767"
"152646233571458109764766382285470424230719843324368007925375351295"
"39576510740045312772012"),
int("452543250979361708074026409576755302296698208397782707067096515523"
"033579018123253402743775747767548650767928190884624134827869137911"
"24188897792458334596297"))
if __name__ == "__main__":
main()
| 29.520833
| 79
| 0.613973
|
from ecpy import EllipticCurve, ExtendedFiniteField, symmetric_tate_pairing
import hashlib
import random
import cPickle
# PKI secret
secret = 0xdeadbeef
p = int("501794446334189957604282155189438160845433783392772743395579628617109"
"929160215221425142482928909270259580854362463493326988807453595748573"
"76419559953437557")
l = (p + 1) / 6
F = ExtendedFiniteField(p, "x^2+x+1")
E = EllipticCurve(F, 0, 1)
P = E(3, int("1418077311270457886139292292020587683642898636677353664354101171"
"7684401801069777797699258667061922178009879315047772033936311133"
"535564812495329881887557081"))
sP = E(int("129862491850266001914601437161941818413833907050695770313188660767"
"152646233571458109764766382285470424230719843324368007925375351295"
"39576510740045312772012"),
int("452543250979361708074026409576755302296698208397782707067096515523"
"033579018123253402743775747767548650767928190884624134827869137911"
"24188897792458334596297"))
def H(x):
return x.x * x.field.p + x.y
def get_user_public(E, P, id, l):
v = int(hashlib.sha512(id).hexdigest().encode("hex"), 16)
return P * v
def get_user_secret(E, pubkey, l):
global secret
return pubkey * secret
def encrypt(E, P, sP, pubkey, m, l):
assert isinstance(m, (int, long))
# r = rand()
r = random.randint(2**30, 2**31)
# r*P, m xor e_l(secret * P, Q)^r = e_l(P, Q) ^ (secret * r)
return (r * P,
m ^ H(E.field(symmetric_tate_pairing(E, sP, pubkey, l) ** r)))
def decrypt(E, K, c, l):
# c1, c2 = r*P, m xor e_l(secret * P, Q) ^ r = e_l(P, Q) ^ (secret * r)
# a = e_l(c1, K) = e_l(r*P, secret * Q) = e_l(P, Q) ^ (secret * r)
return c[1] ^ H(E.field(symmetric_tate_pairing(E, c[0], K, l)))
def main():
global P, sP, l
print "Please tell me your ID :",
ID = raw_input().strip()
Q = get_user_public(E, P, ID, l)
sQ = get_user_secret(E, Q, l)
while True:
print "What to do?"
print "Encryption -> e, Decryption -> d, Quit -> q :",
t = raw_input().strip().lower()
if t == "e":
print "[+] Message? :",
m = int(raw_input().strip().encode("hex"), 16)
C = encrypt(E, P, sP, Q, m, l)
t = tuple(C[0])
c = cPickle.dumps((t[0], t[1], C[1])).encode("zlib").encode("base64")
c = c.replace("\n", "")
print "[+] Your Encrypted Message: %s" % c
elif t == "d":
print "Ciphertext? :",
d = raw_input().strip().decode("base64").decode("zlib")
x, y, c = cPickle.loads(d)
C1 = E(x, y)
C2 = c
C = (C1, C2)
m = decrypt(E, sQ, C, l)
m = hex(m)[2:-1]
if len(m) % 2 == 1:
m = "0" + m
m = m.decode("hex")
print "[+] Your Message :", m
elif t == "q":
print "[+] Quit"
break
if __name__ == "__main__":
main()
| 1,621
| 0
| 138
|
db6094ae5dcf25106c386a249c62bbdac1127a6e
| 4,784
|
py
|
Python
|
src/byro/plugins/sepa/migrations/0001_initial.py
|
dnet/byro
|
26dac998dfc2408224ebddc008f3df85f88f4a1a
|
[
"Apache-2.0"
] | 114
|
2017-08-12T16:47:49.000Z
|
2022-03-17T12:22:59.000Z
|
src/byro/plugins/sepa/migrations/0001_initial.py
|
dnet/byro
|
26dac998dfc2408224ebddc008f3df85f88f4a1a
|
[
"Apache-2.0"
] | 191
|
2017-08-13T09:12:37.000Z
|
2022-03-30T19:57:18.000Z
|
src/byro/plugins/sepa/migrations/0001_initial.py
|
dnet/byro
|
26dac998dfc2408224ebddc008f3df85f88f4a1a
|
[
"Apache-2.0"
] | 45
|
2017-08-13T09:32:19.000Z
|
2022-03-11T21:36:29.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-12 20:32
from __future__ import unicode_literals
import annoying.fields
import byro.common.models.auditable
from django.db import migrations, models
import django.db.models.deletion
import localflavor.generic.models
| 33.690141
| 139
| 0.354724
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-12 20:32
from __future__ import unicode_literals
import annoying.fields
import byro.common.models.auditable
from django.db import migrations, models
import django.db.models.deletion
import localflavor.generic.models
class Migration(migrations.Migration):
initial = True
dependencies = [("members", "0002_auto_20171012_1857")]
operations = [
migrations.CreateModel(
name="MemberSepa",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"iban",
localflavor.generic.models.IBANField(
blank=True,
include_countries=None,
max_length=34,
null=True,
use_nordea_extensions=False,
verbose_name="IBAN",
),
),
(
"bic",
localflavor.generic.models.BICField(
blank=True, max_length=11, null=True, verbose_name="BIC"
),
),
(
"institute",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="IBAN Institute",
),
),
(
"issue_date",
models.DateField(
blank=True,
help_text="The issue date of the direct debit mandate. (1970-01-01 means there is no issue date in the database )",
null=True,
verbose_name="IBAN Issue Date",
),
),
(
"fullname",
models.CharField(
blank=True,
help_text="Full name for IBAN account owner",
max_length=255,
null=True,
verbose_name="IBAN full name",
),
),
(
"address",
models.CharField(
blank=True,
help_text="Address line (e.g. Street / House Number)",
max_length=255,
null=True,
verbose_name="IBAN address",
),
),
(
"zip_code",
models.CharField(
blank=True,
help_text="ZIP Code",
max_length=20,
null=True,
verbose_name="IBAN zip code",
),
),
(
"city",
models.CharField(
blank=True, max_length=255, null=True, verbose_name="IBAN City"
),
),
(
"country",
models.CharField(
blank=True,
default="Deutschland",
max_length=255,
null=True,
verbose_name="IBAN Country",
),
),
(
"mandate_reference",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="IBAN Mandate Reference",
),
),
(
"mandate_reason",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="IBAN Mandate Reason",
),
),
(
"member",
annoying.fields.AutoOneToOneField(
on_delete=django.db.models.deletion.PROTECT,
related_name="sepa",
to="members.Member",
),
),
],
bases=(byro.common.models.auditable.Auditable, models.Model),
)
]
| 0
| 4,479
| 23
|
1d56bd0859c31a8e9b68e858edb002f8e9a04340
| 5,438
|
py
|
Python
|
src/api/bkuser_core/categories/plugins/ldap/adaptor.py
|
shabbywu/bk-user
|
8ea590958a5c6dd3c71d0b72e1d4866ce327efda
|
[
"MIT"
] | null | null | null |
src/api/bkuser_core/categories/plugins/ldap/adaptor.py
|
shabbywu/bk-user
|
8ea590958a5c6dd3c71d0b72e1d4866ce327efda
|
[
"MIT"
] | null | null | null |
src/api/bkuser_core/categories/plugins/ldap/adaptor.py
|
shabbywu/bk-user
|
8ea590958a5c6dd3c71d0b72e1d4866ce327efda
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from dataclasses import dataclass
from typing import Any, Dict, List, NamedTuple, Optional
from bkuser_core.categories.plugins.ldap.models import DepartmentProfile, UserProfile
from bkuser_core.user_settings.loader import ConfigProvider
from django.utils.encoding import force_str
from ldap3.utils import dn as dn_utils
@dataclass
class ProfileFieldMapper:
"""从 ldap 对象属性中获取用户字段"""
config_loader: ConfigProvider
setting_field_map: dict
def get_field(self, user_meta: Dict[str, List[bytes]], field_name: str, raise_exception: bool = False) -> str:
"""根据字段映射关系, 从 ldap 中获取 `field_name` 的值"""
try:
setting_name = self.setting_field_map[field_name]
except KeyError:
if raise_exception:
raise ValueError("该用户字段没有在配置中有对应项,无法同步")
return ""
try:
ldap_field_name = self.config_loader[setting_name]
except KeyError:
if raise_exception:
raise ValueError(f"用户目录配置中缺失字段 {setting_name}")
return ""
try:
if user_meta[ldap_field_name]:
return force_str(user_meta[ldap_field_name][0])
return ""
except KeyError:
if raise_exception:
raise ValueError(f"搜索数据中没有对应的字段 {ldap_field_name}")
return ""
def get_user_attributes(self) -> list:
"""获取远端属性名列表"""
return [self.config_loader[x] for x in self.setting_field_map.values() if self.config_loader[x]]
class RDN(NamedTuple):
"""RelativeDistinguishedName"""
type: str
value: str
separator: str
def parse_dn_tree(dn: str, restrict_types: List[str] = None) -> List[RDN]:
"""A DN is a sequence of relative distinguished names (RDN) connected by commas, For examples:
we have a dn = "CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", this method will parse the dn to:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
[RDN(type='CN', value='Jeff Smith', separator=','),
RDN(type='OU', value='Sales', separator=','),
RDN(type='DC', value='Fabrikam', separator=','),
RDN(type='DC', value='COM', separator='')]
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
Furthermore, restrict_types is Case-insensitive, the ["DC"], ["dc"], ["Dc"] are Exactly equal.
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["dc"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
See Also: https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ldap/distinguished-names
"""
restrict_types = [type_.upper() for type_ in (restrict_types or [])]
items = dn_utils.parse_dn(dn, escape=True)
if restrict_types:
parts = [RDN(*i) for i in items if i[0].upper() in restrict_types]
else:
parts = [RDN(*i) for i in items]
return parts
def parse_dn_value_list(dn: str, restrict_types: List[str] = None) -> List[str]:
"""this method work like parse_dn_tree, be only return values of those attributes, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
['Jeff Smith', 'Sales', 'Fabrikam', 'COM']
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
['Fabrikam', 'COM']
"""
tree = parse_dn_tree(dn, restrict_types)
parts = []
for part in tree:
parts.append(part.value)
return parts
| 38.295775
| 115
| 0.668444
|
# -*- coding: utf-8 -*-
from dataclasses import dataclass
from typing import Any, Dict, List, NamedTuple, Optional
from bkuser_core.categories.plugins.ldap.models import DepartmentProfile, UserProfile
from bkuser_core.user_settings.loader import ConfigProvider
from django.utils.encoding import force_str
from ldap3.utils import dn as dn_utils
@dataclass
class ProfileFieldMapper:
"""从 ldap 对象属性中获取用户字段"""
config_loader: ConfigProvider
setting_field_map: dict
def get_field(self, user_meta: Dict[str, List[bytes]], field_name: str, raise_exception: bool = False) -> str:
"""根据字段映射关系, 从 ldap 中获取 `field_name` 的值"""
try:
setting_name = self.setting_field_map[field_name]
except KeyError:
if raise_exception:
raise ValueError("该用户字段没有在配置中有对应项,无法同步")
return ""
try:
ldap_field_name = self.config_loader[setting_name]
except KeyError:
if raise_exception:
raise ValueError(f"用户目录配置中缺失字段 {setting_name}")
return ""
try:
if user_meta[ldap_field_name]:
return force_str(user_meta[ldap_field_name][0])
return ""
except KeyError:
if raise_exception:
raise ValueError(f"搜索数据中没有对应的字段 {ldap_field_name}")
return ""
def get_user_attributes(self) -> list:
"""获取远端属性名列表"""
return [self.config_loader[x] for x in self.setting_field_map.values() if self.config_loader[x]]
def user_adapter(
code: str, user_meta: Dict[str, Any], field_mapper: ProfileFieldMapper, restrict_types: List[str]
) -> UserProfile:
groups = user_meta["attributes"][field_mapper.config_loader["user_member_of"]]
return UserProfile(
username=field_mapper.get_field(user_meta=user_meta["raw_attributes"], field_name="username"),
email=field_mapper.get_field(user_meta=user_meta["raw_attributes"], field_name="email"),
telephone=field_mapper.get_field(user_meta=user_meta["raw_attributes"], field_name="telephone"),
display_name=field_mapper.get_field(user_meta=user_meta["raw_attributes"], field_name="display_name"),
code=code,
# TODO: 完成转换 departments 的逻辑
departments=[
# 根据约定, dn 中除去第一个成分以外的部分即为用户所在的部门, 因此需要取 [1:]
list(reversed(parse_dn_value_list(user_meta["dn"], restrict_types)[1:])),
# 用户与用户组之间的关系
*[list(reversed(parse_dn_value_list(group, restrict_types))) for group in groups],
],
)
def department_adapter(code: str, dept_meta: Dict, is_group: bool, restrict_types: List[str]) -> DepartmentProfile:
dn = dept_meta["dn"]
dn_values = parse_dn_value_list(dn, restrict_types=restrict_types)
parent_dept: Optional[DepartmentProfile] = None
for dept_name in reversed(dn_values):
parent_dept = DepartmentProfile(
name=dept_name,
parent=parent_dept,
is_group=is_group,
)
assert parent_dept is not None, "未从 dn 中提取到任何部门信息"
parent_dept.code = code
return parent_dept
class RDN(NamedTuple):
"""RelativeDistinguishedName"""
type: str
value: str
separator: str
def parse_dn_tree(dn: str, restrict_types: List[str] = None) -> List[RDN]:
"""A DN is a sequence of relative distinguished names (RDN) connected by commas, For examples:
we have a dn = "CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", this method will parse the dn to:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
[RDN(type='CN', value='Jeff Smith', separator=','),
RDN(type='OU', value='Sales', separator=','),
RDN(type='DC', value='Fabrikam', separator=','),
RDN(type='DC', value='COM', separator='')]
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
Furthermore, restrict_types is Case-insensitive, the ["DC"], ["dc"], ["Dc"] are Exactly equal.
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["dc"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
See Also: https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ldap/distinguished-names
"""
restrict_types = [type_.upper() for type_ in (restrict_types or [])]
items = dn_utils.parse_dn(dn, escape=True)
if restrict_types:
parts = [RDN(*i) for i in items if i[0].upper() in restrict_types]
else:
parts = [RDN(*i) for i in items]
return parts
def parse_dn_value_list(dn: str, restrict_types: List[str] = None) -> List[str]:
"""this method work like parse_dn_tree, be only return values of those attributes, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
['Jeff Smith', 'Sales', 'Fabrikam', 'COM']
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
['Fabrikam', 'COM']
"""
tree = parse_dn_tree(dn, restrict_types)
parts = []
for part in tree:
parts.append(part.value)
return parts
| 1,656
| 0
| 46
|
f1ebbe6c83b7976dc88a8d4893dacd7fb0cdb353
| 1,540
|
py
|
Python
|
Day04.py
|
lsiepman/AdventOfCode2017
|
3ec620bef0971ceeadee28605ec8defba33b4bc5
|
[
"MIT"
] | null | null | null |
Day04.py
|
lsiepman/AdventOfCode2017
|
3ec620bef0971ceeadee28605ec8defba33b4bc5
|
[
"MIT"
] | null | null | null |
Day04.py
|
lsiepman/AdventOfCode2017
|
3ec620bef0971ceeadee28605ec8defba33b4bc5
|
[
"MIT"
] | null | null | null |
# IMPORTS
# DATA
data = []
with open("Data - Day04.txt") as file:
for line in file:
data.append(line.strip().split(" "))
# GOAL 1
"""
A new system policy has been put in place that requires all accounts to use a passphrase
instead of simply a password.
A passphrase consists of a series of words (lowercase letters) separated by spaces.
To ensure security, a valid passphrase must contain no duplicate words.
The system's full passphrase list is available as your puzzle input. How many passphrases are valid?
"""
# ANSWER 1
data_sets = []
for phrase in data:
data_sets.append(set(phrase))
num_valid = 0
for i in range(len(data)):
if len(data[i]) == len(data_sets[i]):
num_valid += 1
print(f"Answer 4a: {num_valid}")
# GOAL 2
"""
For added security, yet another system policy has been put in place.
Now, a valid passphrase must contain no two words that are anagrams of each other - that is,
a passphrase is invalid if any word's letters can be rearranged to form any other word in the passphrase.
"""
sorted_data = []
sorted_data_sets = []
for i in data:
new_i = is_anagram(i)
sorted_data.append(new_i)
sorted_data_sets.append(set(new_i))
num_valid_b = 0
for i in range(len(sorted_data)):
if len(sorted_data[i]) == len(sorted_data_sets[i]):
num_valid_b += 1
print(f"Answer 4b: {num_valid_b}")
| 25.666667
| 105
| 0.698701
|
# IMPORTS
# DATA
data = []
with open("Data - Day04.txt") as file:
for line in file:
data.append(line.strip().split(" "))
# GOAL 1
"""
A new system policy has been put in place that requires all accounts to use a passphrase
instead of simply a password.
A passphrase consists of a series of words (lowercase letters) separated by spaces.
To ensure security, a valid passphrase must contain no duplicate words.
The system's full passphrase list is available as your puzzle input. How many passphrases are valid?
"""
# ANSWER 1
data_sets = []
for phrase in data:
data_sets.append(set(phrase))
num_valid = 0
for i in range(len(data)):
if len(data[i]) == len(data_sets[i]):
num_valid += 1
print(f"Answer 4a: {num_valid}")
# GOAL 2
"""
For added security, yet another system policy has been put in place.
Now, a valid passphrase must contain no two words that are anagrams of each other - that is,
a passphrase is invalid if any word's letters can be rearranged to form any other word in the passphrase.
"""
def is_anagram(phrase):
alpha_phrase = []
for word in phrase:
alpha_word = sorted(word)
alpha_phrase.append("".join(alpha_word))
return alpha_phrase
sorted_data = []
sorted_data_sets = []
for i in data:
new_i = is_anagram(i)
sorted_data.append(new_i)
sorted_data_sets.append(set(new_i))
num_valid_b = 0
for i in range(len(sorted_data)):
if len(sorted_data[i]) == len(sorted_data_sets[i]):
num_valid_b += 1
print(f"Answer 4b: {num_valid_b}")
| 156
| 0
| 23
|
f3a48cb7c9060bd4d80fd365373cb167e56aa4aa
| 710
|
py
|
Python
|
src/azure-cli-core/azure/cli/core/auth/tests/test_adal_authentication.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 3,287
|
2016-07-26T17:34:33.000Z
|
2022-03-31T09:52:13.000Z
|
src/azure-cli-core/azure/cli/core/auth/tests/test_adal_authentication.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 19,206
|
2016-07-26T07:04:42.000Z
|
2022-03-31T23:57:09.000Z
|
src/azure-cli-core/azure/cli/core/auth/tests/test_adal_authentication.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 2,575
|
2016-07-26T06:44:40.000Z
|
2022-03-31T22:56:06.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.core.auth.adal_authentication import _normalize_expires_on
if __name__ == '__main__':
unittest.main()
| 37.368421
| 94
| 0.557746
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.core.auth.adal_authentication import _normalize_expires_on
class TestUtil(unittest.TestCase):
def test_normalize_expires_on(self):
assert _normalize_expires_on("11/05/2021 15:18:31 +00:00") == 1636125511
assert _normalize_expires_on('1636125511') == 1636125511
if __name__ == '__main__':
unittest.main()
| 161
| 13
| 49
|
cc8b768dfe5908a23f0c137ef0e57006b53db5c0
| 5,861
|
py
|
Python
|
loggers_control/scripts/solo_escape_vpg_train.py
|
IRASatUC/two_loggers
|
c5c99868a9c896aa2fdb940f2f7b7173abed9e00
|
[
"MIT"
] | null | null | null |
loggers_control/scripts/solo_escape_vpg_train.py
|
IRASatUC/two_loggers
|
c5c99868a9c896aa2fdb940f2f7b7173abed9e00
|
[
"MIT"
] | null | null | null |
loggers_control/scripts/solo_escape_vpg_train.py
|
IRASatUC/two_loggers
|
c5c99868a9c896aa2fdb940f2f7b7173abed9e00
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""
An implementation of Vanilla Policy Gradient (VPG) for solo_escape_task
VPG is a model free, on policy, reinforcement learning algorithm (https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf)
Author: LinZHanK (linzhank@gmail.com)
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import rospy
from envs.solo_escape_task_env import SoloEscapeEnv
from utils import data_utils, solo_utils, tf_utils
from utils.data_utils import bcolors
from agents.vpg import VPGAgent
if __name__ == "__main__":
# create argument parser
args = data_utils.get_args()
# start timing training
rospy.init_node("solo_escape_dqn", anonymous=True, log_level=rospy.INFO)
# make an instance from env class
env = SoloEscapeEnv()
env.reset()
agent_params = {}
train_params = {}
# agent parameters
agent_params["dim_state"] = len(solo_utils.obs_to_state(env.observation))
agent_params["actions"] = np.array([np.array([1, -1]), np.array([1, 1])])
agent_params["layer_sizes"] = args.layer_sizes
agent_params["learning_rate"] = args.learning_rate
# training params
if args.datetime:
train_params["datetime"] = args.datetime
else:
train_params["datetime"] = datetime.now().strftime("%Y-%m-%d-%H-%M")
train_params["num_epochs"] = args.num_epochs
train_params["num_steps"] = args.num_steps
train_params["time_bonus"] = -1./train_params['num_steps']
train_params["success_bonus"] = 0
train_params["wall_bonus"] = -10./train_params["num_steps"]
train_params["door_bonus"] = 0
train_params["sample_size"] = args.sample_size
# instantiate agent
agent = VPGAgent(agent_params)
# specify model path
model_path = os.path.dirname(sys.path[0])+"/saved_models/solo_escape/vpg/"+train_params["datetime"]+"/agent/model.h5"
update_counter = 0
episodic_returns = []
episode = 0
step = 0
start_time = time.time()
for ep in range(train_params['num_epochs']):
# init training batches
batch_states = []
batch_acts = []
batch_rtaus = []
# init episode
obs, _ = env.reset()
state_0 = solo_utils.obs_to_state(obs)
done, ep_rewards = False, []
batch_counter = 0
while True:
# take action by sampling policy_net predictions
act_id = agent.sample_action(state_0)
action = agent.actions[act_id]
obs, rew, done, info = env.step(action)
state_1 = solo_utils.obs_to_state(obs)
# adjust reward
rew, done = solo_utils.adjust_reward(train_params, env)
# fill training batch
batch_acts.append(act_id)
batch_states.append(state_0)
# update
ep_rewards.append(rew)
state_0 = state_1
print(
bcolors.OKGREEN,
"Epoch: {} \nEpisode: {}, Step: {} \naction: {}->{}, state: {}, reward/episodic_return: {}/{}, status: {}, success: {}".format(
ep,
episode,
step,
act_id,
action,
state_1,
rew,
sum(ep_rewards),
info,
env.success_count
),
bcolors.ENDC
)
# step increment
step += 1
if done:
ep_return, ep_length = sum(ep_rewards), len(ep_rewards)
batch_rtaus += list(solo_utils.reward_to_go(ep_rewards))
assert len(batch_rtaus) == len(batch_states)
# store episodic_return
episodic_returns.append(ep_return)
# reset to a new episode
obs, _ = env.reset()
done, ep_rewards = False, []
state_0 = solo_utils.obs_to_state(obs)
episode += 1
step = 0
print(
bcolors.OKGREEN,
"current batch size: {}".format(len(batch_rtaus)),
bcolors.ENDC
)
if len(batch_rtaus) > train_params['sample_size']:
break
agent.train(batch_states, batch_acts, batch_rtaus)
agent.save_model(model_path)
# time training
end_time = time.time()
training_time = end_time - start_time
# plot episodic returns
data_utils.plot_returns(returns=episodic_returns, mode=0, save_flag=True, fdir=os.path.dirname(model_path))
# plot accumulated returns
data_utils.plot_returns(returns=episodic_returns, mode=1, save_flag=True, fdir=os.path.dirname(model_path))
# plot averaged return
data_utils.plot_returns(returns=episodic_returns, mode=2, save_flag=True,
fdir=os.path.dirname(model_path))
# save agent parameters
data_utils.save_pkl(content=agent_params, fdir=os.path.dirname(model_path), fname="agent_parameters.pkl")
# save returns
data_utils.save_pkl(content=episodic_returns, fdir=os.path.dirname(os.path.dirname(model_path)), fname="episodic_returns.pkl")
# save results
train_info = train_params
train_info["success_count"] = env.success_count
train_info["training_time"] = training_time
train_info["learning_rate"] = agent_params["learning_rate"]
train_info["state_dimension"] = agent_params["dim_state"]
train_info["action_options"] = agent_params["actions"]
train_info["layer_sizes"] = agent_params["layer_sizes"]
data_utils.save_csv(content=train_info, fdir=os.path.dirname(os.path.dirname(model_path)), fname="train_information.csv")
| 40.42069
| 183
| 0.627026
|
#! /usr/bin/env python
"""
An implementation of Vanilla Policy Gradient (VPG) for solo_escape_task
VPG is a model free, on policy, reinforcement learning algorithm (https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf)
Author: LinZHanK (linzhank@gmail.com)
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import rospy
from envs.solo_escape_task_env import SoloEscapeEnv
from utils import data_utils, solo_utils, tf_utils
from utils.data_utils import bcolors
from agents.vpg import VPGAgent
if __name__ == "__main__":
# create argument parser
args = data_utils.get_args()
# start timing training
rospy.init_node("solo_escape_dqn", anonymous=True, log_level=rospy.INFO)
# make an instance from env class
env = SoloEscapeEnv()
env.reset()
agent_params = {}
train_params = {}
# agent parameters
agent_params["dim_state"] = len(solo_utils.obs_to_state(env.observation))
agent_params["actions"] = np.array([np.array([1, -1]), np.array([1, 1])])
agent_params["layer_sizes"] = args.layer_sizes
agent_params["learning_rate"] = args.learning_rate
# training params
if args.datetime:
train_params["datetime"] = args.datetime
else:
train_params["datetime"] = datetime.now().strftime("%Y-%m-%d-%H-%M")
train_params["num_epochs"] = args.num_epochs
train_params["num_steps"] = args.num_steps
train_params["time_bonus"] = -1./train_params['num_steps']
train_params["success_bonus"] = 0
train_params["wall_bonus"] = -10./train_params["num_steps"]
train_params["door_bonus"] = 0
train_params["sample_size"] = args.sample_size
# instantiate agent
agent = VPGAgent(agent_params)
# specify model path
model_path = os.path.dirname(sys.path[0])+"/saved_models/solo_escape/vpg/"+train_params["datetime"]+"/agent/model.h5"
update_counter = 0
episodic_returns = []
episode = 0
step = 0
start_time = time.time()
for ep in range(train_params['num_epochs']):
# init training batches
batch_states = []
batch_acts = []
batch_rtaus = []
# init episode
obs, _ = env.reset()
state_0 = solo_utils.obs_to_state(obs)
done, ep_rewards = False, []
batch_counter = 0
while True:
# take action by sampling policy_net predictions
act_id = agent.sample_action(state_0)
action = agent.actions[act_id]
obs, rew, done, info = env.step(action)
state_1 = solo_utils.obs_to_state(obs)
# adjust reward
rew, done = solo_utils.adjust_reward(train_params, env)
# fill training batch
batch_acts.append(act_id)
batch_states.append(state_0)
# update
ep_rewards.append(rew)
state_0 = state_1
print(
bcolors.OKGREEN,
"Epoch: {} \nEpisode: {}, Step: {} \naction: {}->{}, state: {}, reward/episodic_return: {}/{}, status: {}, success: {}".format(
ep,
episode,
step,
act_id,
action,
state_1,
rew,
sum(ep_rewards),
info,
env.success_count
),
bcolors.ENDC
)
# step increment
step += 1
if done:
ep_return, ep_length = sum(ep_rewards), len(ep_rewards)
batch_rtaus += list(solo_utils.reward_to_go(ep_rewards))
assert len(batch_rtaus) == len(batch_states)
# store episodic_return
episodic_returns.append(ep_return)
# reset to a new episode
obs, _ = env.reset()
done, ep_rewards = False, []
state_0 = solo_utils.obs_to_state(obs)
episode += 1
step = 0
print(
bcolors.OKGREEN,
"current batch size: {}".format(len(batch_rtaus)),
bcolors.ENDC
)
if len(batch_rtaus) > train_params['sample_size']:
break
agent.train(batch_states, batch_acts, batch_rtaus)
agent.save_model(model_path)
# time training
end_time = time.time()
training_time = end_time - start_time
# plot episodic returns
data_utils.plot_returns(returns=episodic_returns, mode=0, save_flag=True, fdir=os.path.dirname(model_path))
# plot accumulated returns
data_utils.plot_returns(returns=episodic_returns, mode=1, save_flag=True, fdir=os.path.dirname(model_path))
# plot averaged return
data_utils.plot_returns(returns=episodic_returns, mode=2, save_flag=True,
fdir=os.path.dirname(model_path))
# save agent parameters
data_utils.save_pkl(content=agent_params, fdir=os.path.dirname(model_path), fname="agent_parameters.pkl")
# save returns
data_utils.save_pkl(content=episodic_returns, fdir=os.path.dirname(os.path.dirname(model_path)), fname="episodic_returns.pkl")
# save results
train_info = train_params
train_info["success_count"] = env.success_count
train_info["training_time"] = training_time
train_info["learning_rate"] = agent_params["learning_rate"]
train_info["state_dimension"] = agent_params["dim_state"]
train_info["action_options"] = agent_params["actions"]
train_info["layer_sizes"] = agent_params["layer_sizes"]
data_utils.save_csv(content=train_info, fdir=os.path.dirname(os.path.dirname(model_path)), fname="train_information.csv")
| 0
| 0
| 0
|
e45667b241db8c1639b0fc558affff0913b99a11
| 855
|
py
|
Python
|
buycoins/auth.py
|
iyanuashiri/buycoin-python-sdk
|
c791ad48dc2ca518c5e05c09fbcaf8e05f8a22b9
|
[
"MIT"
] | 1
|
2021-05-12T11:24:11.000Z
|
2021-05-12T11:24:11.000Z
|
buycoins/auth.py
|
iyanuashiri/buycoin-python-sdk
|
c791ad48dc2ca518c5e05c09fbcaf8e05f8a22b9
|
[
"MIT"
] | null | null | null |
buycoins/auth.py
|
iyanuashiri/buycoin-python-sdk
|
c791ad48dc2ca518c5e05c09fbcaf8e05f8a22b9
|
[
"MIT"
] | null | null | null |
# Buycoin Python SDK
# Copyright 2021 Iyanuoluwa Ajao
# See LICENCE for details.
"""
Authentication is handled by the :any:`Airtable` class.
>>> airtable = Airtable(base_key, table_name, api_key)
Note:
You can also use this class to handle authentication for you if you
are making your own wrapper:
>>> auth = BuycoinsAuth(api_key)
>>>
>>> response = requests.get('https://api.airtable.com/v0/{basekey}/{table_name}', auth=auth)
"""
from requests.auth import AuthBase
| 25.147059
| 96
| 0.65731
|
# Buycoin Python SDK
# Copyright 2021 Iyanuoluwa Ajao
# See LICENCE for details.
"""
Authentication is handled by the :any:`Airtable` class.
>>> airtable = Airtable(base_key, table_name, api_key)
Note:
You can also use this class to handle authentication for you if you
are making your own wrapper:
>>> auth = BuycoinsAuth(api_key)
>>>
>>> response = requests.get('https://api.airtable.com/v0/{basekey}/{table_name}', auth=auth)
"""
from requests.auth import AuthBase
class BuycoinsAuth(AuthBase):
def __int__(self, api_key):
"""
Authentication used by Buycoin
:param api_key:
:return:
"""
self.api_key = api_key
def __call__(self, request):
auth_token = {"Authorization": f"Basic {self.api_key}"}
request.headers.update(auth_token)
return request
| 137
| 202
| 23
|
3fbb3343817355008ebb4eef4a88009376424e09
| 5,360
|
py
|
Python
|
Utilities/ReleaseScripts/scripts/ws_sso_content_reader.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Utilities/ReleaseScripts/scripts/ws_sso_content_reader.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Utilities/ReleaseScripts/scripts/ws_sso_content_reader.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
#!/usr/bin/env python3
###Description: The tool reads cern web services behind SSO using user certificates
from __future__ import print_function
import os, urllib, urllib2, httplib, cookielib, sys, HTMLParser, re
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser(usage="%prog [-d(ebug)] -o(ut) COOKIE_FILENAME -c(cert) CERN-PEM -k(ey) CERT-KEY -u(rl) URL")
parser.add_option("-d", "--debug", dest="debug", help="Enable pycurl debugging. Prints to data and headers to stderr.", action="store_true", default=False)
parser.add_option("-p", "--postdata", dest="postdata", help="Data to be sent as post request", action="store", default=None)
parser.add_option("-c", "--cert", dest="cert_path", help="[REQUIRED] Absolute path to cert file.", action="store")
parser.add_option("-k", "--key", dest="key_path", help="[REQUIRED] Absolute path to key file.", action="store")
parser.add_option("-u", "--url", dest="url", help="[REQUIRED] Url to a service behind the SSO", action="store")
(opts, args) = parser.parse_args()
checkRequiredArguments(opts, parser)
content = getContent(opts.url, opts.cert_path, opts.key_path, opts.postdata, opts.debug)
print(content)
| 44.666667
| 157
| 0.705037
|
#!/usr/bin/env python3
###Description: The tool reads cern web services behind SSO using user certificates
from __future__ import print_function
import os, urllib, urllib2, httplib, cookielib, sys, HTMLParser, re
from optparse import OptionParser
def getFile(path):
npath = os.path.expanduser(path)
while os.path.islink(npath):
path = os.readlink(npath)
if path[0] != "/": path = os.path.join(os.path.dirname(npath),path)
npath = path
return npath
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
def __init__(self, key, cert):
urllib2.HTTPSHandler.__init__(self)
self.key = getFile(key)
self.cert = getFile(cert)
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
def _getResponse(opener, url, post_data=None, debug=False):
response = opener.open(url, post_data)
if debug:
sys.stderr.write("Code: %s\n" % response.code)
sys.stderr.write("Headers: %s\n" % response.headers)
sys.stderr.write("Msg: %s\n" % response.msg)
sys.stderr.write("Url: %s\n" % response.url)
return response
def getResponseContent(opener, url, post_data=None, debug=False):
return _getResponse(opener, url, post_data, debug).read()
def getResponseURL(opener, url, post_data=None, debug=False):
return urllib2.unquote(_getResponse(opener, url, post_data, debug).url)
def getParentURL(url):
items = url.split("/")
return '%s//%s/%s/' % (items[0],items[2],items[3])
def getSSOCookie(opener, target_url, cookie, debug=False):
opener.addheaders = [('User-agent', 'curl-sso-certificate/0.0.2')] #in sync with cern-get-sso-cookie tool
url = getResponseURL(opener, getParentURL(target_url), debug=debug)
content = getResponseContent(opener, url, debug=debug)
ret = re.search('<form .+? action="(.+?)">', content)
if ret == None:
raise Exception("error: The page doesn't have the form with adfs url, check 'User-agent' header")
url = urllib2.unquote(ret.group(1))
h = HTMLParser.HTMLParser()
post_data_local = ''
for match in re.finditer('input type="hidden" name="([^"]*)" value="([^"]*)"', content):
post_data_local += "&%s=%s" % (match.group(1), urllib.quote(h.unescape(match.group(2))))
is_link_found = True
if not is_link_found:
raise Exception("error: The page doesn't have the form with security attributes, check 'User-agent' header")
post_data_local = post_data_local[1:] #remove first &
getResponseContent(opener, url, post_data_local, debug)
def getContent(target_url, cert_path, key_path, post_data=None, debug=False, adfslogin=None):
opener = urllib2.build_opener(urllib2.HTTPSHandler())
if adfslogin:
opener.addheaders = [('Adfs-Login', adfslogin)] #local version of tc test
#try to access the url first
try:
content = getResponseContent(opener, target_url, post_data, debug)
if not 'Sign in with your CERN account' in content:
return content
except Exception:
if debug:
sys.stderr.write("The request has an error, will try to create a new cookie\n")
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie), HTTPSClientAuthHandler(key_path, cert_path)) #will use private key and ceritifcate
if debug:
sys.stderr.write("The return page is sso login page, will request cookie.")
hasCookie = False
# if the access gave an exception, try to get a cookie
try:
getSSOCookie(opener, target_url, cookie, debug)
hasCookie = True
result = getResponseContent(opener, target_url, post_data, debug)
except Exception as e:
result = ""
print(sys.stderr.write("ERROR:"+str(e)))
if hasCookie:
burl = getParentURL(target_url)
try:
_getResponse(opener, burl+"signOut").read()
_getResponse(opener, "https://login.cern.ch/adfs/ls/?wa=wsignout1.0").read()
except:
sys.stderr.write("Error, could not logout correctly from server")
return result
def checkRequiredArguments(opts, parser):
missing_options = []
for option in parser.option_list:
if re.match(r'^\[REQUIRED\]', option.help) and eval('opts. %s' % option.dest) == None:
missing_options.extend(option._long_opts)
if len(missing_options) > 0:
parser.error('Missing REQUIRED parameters: %s' % str(missing_options))
if __name__ == "__main__":
parser = OptionParser(usage="%prog [-d(ebug)] -o(ut) COOKIE_FILENAME -c(cert) CERN-PEM -k(ey) CERT-KEY -u(rl) URL")
parser.add_option("-d", "--debug", dest="debug", help="Enable pycurl debugging. Prints to data and headers to stderr.", action="store_true", default=False)
parser.add_option("-p", "--postdata", dest="postdata", help="Data to be sent as post request", action="store", default=None)
parser.add_option("-c", "--cert", dest="cert_path", help="[REQUIRED] Absolute path to cert file.", action="store")
parser.add_option("-k", "--key", dest="key_path", help="[REQUIRED] Absolute path to key file.", action="store")
parser.add_option("-u", "--url", dest="url", help="[REQUIRED] Url to a service behind the SSO", action="store")
(opts, args) = parser.parse_args()
checkRequiredArguments(opts, parser)
content = getContent(opts.url, opts.cert_path, opts.key_path, opts.postdata, opts.debug)
print(content)
| 3,832
| 30
| 290
|
dfa763b060a7dac72bb41d68286cd5c008f99b9d
| 5,437
|
py
|
Python
|
client/verta/tests/custom_modules/test_custom_modules.py
|
mitdbg/modeldb
|
1d42591ea7ce2a1f7211e40dc46a79e53fa290f0
|
[
"MIT"
] | 835
|
2017-02-08T20:14:24.000Z
|
2020-03-12T17:37:49.000Z
|
client/verta/tests/custom_modules/test_custom_modules.py
|
mitdbg/modeldb
|
1d42591ea7ce2a1f7211e40dc46a79e53fa290f0
|
[
"MIT"
] | 113
|
2017-02-12T02:04:37.000Z
|
2019-12-05T09:33:12.000Z
|
client/verta/tests/custom_modules/test_custom_modules.py
|
mitdbg/modeldb
|
1d42591ea7ce2a1f7211e40dc46a79e53fa290f0
|
[
"MIT"
] | 170
|
2017-02-13T14:49:22.000Z
|
2020-02-19T17:59:12.000Z
|
# -*- coding: utf-8 -*-
import filecmp
import json
import os
import pkgutil
import zipfile
import hypothesis
import pytest
import six
from verta.tracking.entities._deployable_entity import _DeployableEntity
from verta._internal_utils.custom_modules import CustomModules
from .. import utils
from . import contexts
| 39.398551
| 97
| 0.622034
|
# -*- coding: utf-8 -*-
import filecmp
import json
import os
import pkgutil
import zipfile
import hypothesis
import pytest
import six
from verta.tracking.entities._deployable_entity import _DeployableEntity
from verta._internal_utils.custom_modules import CustomModules
from .. import utils
from . import contexts
class TestPipInstalledModule:
@staticmethod
def assert_in_custom_modules(custom_modules, module_name):
module = CustomModules.get_module_path(module_name)
with utils.tempdir() as custom_modules_dir:
with zipfile.ZipFile(custom_modules, "r") as zipf:
zipf.extractall(custom_modules_dir)
# TODO: extract sys.path from _verta_config.py instead of walking
for parent_dir, dirnames, filenames in os.walk(custom_modules_dir):
if os.path.basename(module) in dirnames + filenames:
retrieved_module = os.path.join(
parent_dir,
os.path.basename(module),
)
break
else:
raise ValueError("module not found in custom modules")
if os.path.isfile(module):
assert filecmp.cmp(module, retrieved_module)
else:
utils.assert_dirs_match(module, retrieved_module)
@pytest.mark.parametrize(
"name",
sorted(module[1] for module in pkgutil.iter_modules()),
)
def test_module(self, name):
"""pip-installed module can be collected."""
if name == "tests" or name == "conftest" or name.startswith("test_"):
pytest.skip(
"pytest modifies both import mechanisms and module objects,"
" which we can't handle right now"
)
if six.PY2 and name == "pytest_forked":
pytest.skip(
"pytest_forked insists on having an empty __pycache__,"
" which custom modules ignores, which fails our match check"
)
if CustomModules.get_module_path(name) in ("built-in", "frozen"):
pytest.skip("built into Python; no module file to collect")
if six.PY2 and (name.startswith("tensorflow_") or name == "torch"):
pytest.skip("takes too long")
custom_modules = _DeployableEntity._custom_modules_as_artifact([name])
self.assert_in_custom_modules(custom_modules, name)
@pytest.mark.parametrize(
"names",
[
["cloudpickle", "hypothesis"],
["cloudpickle", "hypothesis", "pytest"],
],
)
def test_multiple_modules(self, names):
"""Multiple pip-installed modules can be collected at once."""
custom_modules = _DeployableEntity._custom_modules_as_artifact(names)
for name in names:
self.assert_in_custom_modules(custom_modules, name)
def test_module_and_local_dir_have_same_name(self, worker_id):
"""If a pip-installed module and a local directory share a name, the module is collected.
If a user can import a package "foo" in their environment, and uses
custom modules to find "foo", we will prefer that package over a
directory/file "foo" in the cwd. Otherwise, it is very difficult or
impossible to force the installed package.
"""
name = worker_id
# avoid using an existing package name
hypothesis.assume(not CustomModules.is_importable(name))
with utils.chtempdir():
# create local directory with same name as package
local_dir = os.path.abspath(name)
os.mkdir(local_dir)
with open(os.path.join(local_dir, "empty.json"), "w") as f:
json.dump({}, f)
# create package in another directory and install
with utils.tempdir() as tempd:
with contexts.installable_package(name, dir=tempd) as pkg_dir:
with contexts.installed_local_package(pkg_dir, name):
# collect and validate custom modules
custom_modules = _DeployableEntity._custom_modules_as_artifact(
[name],
)
self.assert_in_custom_modules(custom_modules, name)
def test_module_and_local_pkg_have_same_name(self, worker_id):
"""A specific case of :meth:`test_module_and_local_dir_have_same_name`.
The local directory *is* a Python package repository (but not directly
importable without ``cd``ing one level into it).
A user may have a monolithic project with model management scripts
alongside Python package directories (that may *also* be installed
into the environment).
"""
name = worker_id
# avoid using an existing package name
hypothesis.assume(not CustomModules.is_importable(name))
with utils.chtempdir():
# create package in *current* directory and install
with contexts.installable_package(name, dir=".") as pkg_dir:
with contexts.installed_local_package(pkg_dir, name):
# collect and validate custom modules
custom_modules = _DeployableEntity._custom_modules_as_artifact(
[name],
)
self.assert_in_custom_modules(custom_modules, name)
| 954
| 4,141
| 23
|
9f0907cd2068ea5228993bfd0610324a10152909
| 135
|
py
|
Python
|
src/privatemedia/fileutils.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 8
|
2016-01-29T11:53:40.000Z
|
2020-03-02T22:42:02.000Z
|
src/privatemedia/fileutils.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | 289
|
2015-03-23T07:42:52.000Z
|
2022-03-11T23:26:10.000Z
|
src/privatemedia/fileutils.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 7
|
2015-12-08T09:03:20.000Z
|
2020-05-11T15:36:51.000Z
|
# We need those imports for migrations compatibility purpose
from privatemedia.storage import ProtectedStorage, PrivateStorage # noqa
| 45
| 73
| 0.844444
|
# We need those imports for migrations compatibility purpose
from privatemedia.storage import ProtectedStorage, PrivateStorage # noqa
| 0
| 0
| 0
|
2d7635ac6f4ed320e89fc80aef9f0d67f08346b4
| 1,786
|
py
|
Python
|
remote/vagrant_api.py
|
devaos/sublime-remote
|
d0dd57f464d599f0277b76bc19a4ba8c940f081d
|
[
"MIT"
] | 2
|
2016-12-30T12:43:54.000Z
|
2018-06-23T20:08:24.000Z
|
remote/vagrant_api.py
|
devaos/sublime-remote
|
d0dd57f464d599f0277b76bc19a4ba8c940f081d
|
[
"MIT"
] | null | null | null |
remote/vagrant_api.py
|
devaos/sublime-remote
|
d0dd57f464d599f0277b76bc19a4ba8c940f081d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Ari Aosved
# http://github.com/devaos/sublime-remote/blob/master/LICENSE
"""This module implements an API layer for Vagrant related functionality."""
import re
import subprocess
# =============================================================================
def parse_vm_id(line):
"""Determine if a line appears to be from `vagrant global-status`."""
parts = re.split("\s+", line)
if len(parts) == 5 and parts[0] != "id" \
and re.match("^[0-9a-f]{1,7}$", parts[0]):
return parts[0]
return None
def get_vm_list(opt):
"""Pull a list of all running vagrant VMs for the user to choose from."""
p1 = subprocess.Popen(["/usr/bin/vagrant", "global-status"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
buf = p1.stdout.readline()
decoded = buf.decode("utf-8").rstrip()
if decoded == "" and p1.poll() is not None:
break
if decoded == "":
continue
if parse_vm_id(decoded) is not None:
opt.append(decoded)
return opt
def get_ssh_options(vm):
"""Pull the ssh options required to connect to a specific vagrant VM."""
cmd = 'PATH="${PATH}:/usr/local/bin" /usr/bin/vagrant ssh-config'
cmd = cmd + ' ' + vm + '; exit 0'
print("ssh options cmd", cmd)
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
out = out.decode("utf-8").rstrip()
print("ssh options output", out)
obj = [s.strip().split(' ') for s in out.splitlines()]
opt = []
for field in obj:
if field[0] != "Host":
opt.append("=".join(field))
opt = "-o " + " -o ".join(opt)
print("ssh options parsed", opt)
return opt
| 27.476923
| 79
| 0.56383
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Ari Aosved
# http://github.com/devaos/sublime-remote/blob/master/LICENSE
"""This module implements an API layer for Vagrant related functionality."""
import re
import subprocess
# =============================================================================
def parse_vm_id(line):
"""Determine if a line appears to be from `vagrant global-status`."""
parts = re.split("\s+", line)
if len(parts) == 5 and parts[0] != "id" \
and re.match("^[0-9a-f]{1,7}$", parts[0]):
return parts[0]
return None
def get_vm_list(opt):
"""Pull a list of all running vagrant VMs for the user to choose from."""
p1 = subprocess.Popen(["/usr/bin/vagrant", "global-status"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
buf = p1.stdout.readline()
decoded = buf.decode("utf-8").rstrip()
if decoded == "" and p1.poll() is not None:
break
if decoded == "":
continue
if parse_vm_id(decoded) is not None:
opt.append(decoded)
return opt
def get_ssh_options(vm):
"""Pull the ssh options required to connect to a specific vagrant VM."""
cmd = 'PATH="${PATH}:/usr/local/bin" /usr/bin/vagrant ssh-config'
cmd = cmd + ' ' + vm + '; exit 0'
print("ssh options cmd", cmd)
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
out = out.decode("utf-8").rstrip()
print("ssh options output", out)
obj = [s.strip().split(' ') for s in out.splitlines()]
opt = []
for field in obj:
if field[0] != "Host":
opt.append("=".join(field))
opt = "-o " + " -o ".join(opt)
print("ssh options parsed", opt)
return opt
| 0
| 0
| 0
|
a1a8c31e5c119dbb7727ceec4702308b97c2a5f5
| 2,814
|
py
|
Python
|
gym_framework/panda_ctrl/panda_mujoco_torque_ctrl.py
|
Yucheng-Tang/SimulationFrameworkPublic
|
3a65cbc0f18ac4b04f8aef7e6e2f9ad9790179c6
|
[
"MIT"
] | 3
|
2020-11-16T09:01:56.000Z
|
2021-12-21T09:24:45.000Z
|
gym_framework/panda_ctrl/panda_mujoco_torque_ctrl.py
|
Yucheng-Tang/SimulationFrameworkPublic
|
3a65cbc0f18ac4b04f8aef7e6e2f9ad9790179c6
|
[
"MIT"
] | null | null | null |
gym_framework/panda_ctrl/panda_mujoco_torque_ctrl.py
|
Yucheng-Tang/SimulationFrameworkPublic
|
3a65cbc0f18ac4b04f8aef7e6e2f9ad9790179c6
|
[
"MIT"
] | 8
|
2020-11-24T15:59:01.000Z
|
2022-02-18T15:15:26.000Z
|
import gym
import numpy as np
from gym_framework.panda_ctrl.panda_mujoco_base_ctrl import PandaBase
class PandaTorqueControl(PandaBase):
"""
Control the Panda robot by directly applying torques (control=torque).
"""
@property
@property
@property
@property
| 36.545455
| 111
| 0.603412
|
import gym
import numpy as np
from gym_framework.panda_ctrl.panda_mujoco_base_ctrl import PandaBase
class PandaTorqueControl(PandaBase):
"""
Control the Panda robot by directly applying torques (control=torque).
"""
def __init__(self, render=True):
super().__init__(render=render)
self._action_dimension = 8 # 7 x actuator torque; 1 gripper width
def apply_action(self, action):
assert len(action) == self.action_dimension, ("Error, wrong action dimension. Expected: " +
str(self.action_dimension) + ". Got:" + str(len(action)))
action = self.bound_action(action).copy()
gripper_ctrl = action[7]
joint_action = action[:7] * self.sim.model.opt.timestep
# Set the joint command for the simulation
self.sim.data.ctrl[:] = np.concatenate(([gripper_ctrl, gripper_ctrl], joint_action))
# Apply gravity compensation
self.sim.data.qfrc_applied[self.joint_indices] = self.sim.data.qfrc_bias[self.joint_indices]
# Forward the simulation
self.sim.step()
# Render the scene
if self.render and self.viewer is not None:
self.viewer.render()
@property
def ctrl_name(self):
return 'torque'
@property
def action_dimension(self):
return self._action_dimension
@property
def action_space(self):
# upper and lower bounds on the actions
low = [-87, -87, -87, -87, -12, -12, -12, -50]
high = [87, 87, 87, 87, 12, 12, 12, 50]
action_space = gym.spaces.Box(low=np.array(low),
high=np.array(high))
return action_space
@property
def state(self):
current_joint_position = [self.sim.data.get_joint_qpos(j_name) for j_name in self.joint_names]
current_joint_velocity = [self.sim.data.get_joint_qvel(j_name) for j_name in self.joint_names]
current_finger_position = [self.sim.data.get_joint_qpos(j_name) for j_name in self.gripper_names]
current_finger_velocity = [self.sim.data.get_joint_qvel(j_name) for j_name in self.gripper_names]
tcp_pos = self.sim.data.get_body_xpos('tcp').copy()
tcp_quat = self.sim.data.get_body_xquat('tcp').copy()
tcp_velp = self.sim.data.get_body_xvelp('tcp').copy()
tcp_velr = self.sim.data.get_body_xvelr('tcp').copy()
return np.concatenate([current_joint_position,
current_joint_velocity,
current_finger_position,
current_finger_velocity,
tcp_pos,
tcp_quat,
tcp_velp,
tcp_velr])
| 2,365
| 0
| 158
|
041bab6ee68b38136895b05d676af598dec6d4bb
| 10,708
|
py
|
Python
|
main.py
|
Aus-miner/Miner-Model
|
f7abc9f74cec00f82a2df6e359363670a64ad72f
|
[
"MIT"
] | 18
|
2021-04-18T03:51:22.000Z
|
2022-03-16T13:14:36.000Z
|
main.py
|
Aus-miner/Miner-Model
|
f7abc9f74cec00f82a2df6e359363670a64ad72f
|
[
"MIT"
] | 1
|
2021-05-04T14:27:02.000Z
|
2021-05-04T14:27:02.000Z
|
main.py
|
Aus-miner/Miner-Model
|
f7abc9f74cec00f82a2df6e359363670a64ad72f
|
[
"MIT"
] | 8
|
2021-05-03T19:24:19.000Z
|
2022-02-20T22:20:18.000Z
|
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
from agents import *
from generators import *
from CMDataLoader import CMDataLoader
from Simulator import Simulator
from plotutils import update_layout_wrapper
import config
import constants
import random
# my_palette = ["#264653","#9D1DC8","#287271", "#645DAC","#636EFA", "#ECA400","#FE484E","#8484E8", "#03b800" ,"#9251e1","#F4A261"]
# my_palette = ["#54478c","#9D1DC8","#2c699a","#048ba8","#0db39e","#16db93","#83e377","#b9e769","#efea5a","#f1c453","#f29e4c"]
my_palette = ["#1f00a7","#9d1dc8","#00589f","#009b86","#00a367","#67a300","#645dac","#eca400","#fd7e00","#b6322b", "#FE484E"]
hardware_palette = ["#009b86", "#9D1DC8"]
opex_palette = ["#9D1DC8","#264653","#8484E8"]
primary_color = ["#9d1dc8"]
if __name__ == '__main__':
random.seed(1032009)
np.random.seed(1032009)
n_trials = 25
fee_params = CMDataLoader.get_historical_fee_params()
block_subsidy = 6.25
historical_price_params = CMDataLoader.get_historical_price_params()
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical")
bearish_price_params = (historical_price_params[0], -1 * abs(historical_price_params[1]), historical_price_params[2])
get_summary_plots(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish")
corrections_price_params = (historical_price_params[0], 0, historical_price_params[2] * 1.25)
get_summary_plots(corrections_price_params, fee_params, block_subsidy, n_trials, "in Bull Market with Corrections", "corrections")
s9_s19_prices = {key: config.machine_prices[key] for key in [constants.MachineName.ANTMINER_S9, constants.MachineName.ANTMINER_S19]}
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical-machines", s9_s19_prices, [0.03], hardware_palette)
get_summary_plots_opex(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish-opex", s9_s19_prices, [0.03, 0.04, 0.05], opex_palette)
| 67.345912
| 201
| 0.653063
|
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
from agents import *
from generators import *
from CMDataLoader import CMDataLoader
from Simulator import Simulator
from plotutils import update_layout_wrapper
import config
import constants
import random
# my_palette = ["#264653","#9D1DC8","#287271", "#645DAC","#636EFA", "#ECA400","#FE484E","#8484E8", "#03b800" ,"#9251e1","#F4A261"]
# my_palette = ["#54478c","#9D1DC8","#2c699a","#048ba8","#0db39e","#16db93","#83e377","#b9e769","#efea5a","#f1c453","#f29e4c"]
my_palette = ["#1f00a7","#9d1dc8","#00589f","#009b86","#00a367","#67a300","#645dac","#eca400","#fd7e00","#b6322b", "#FE484E"]
hardware_palette = ["#009b86", "#9D1DC8"]
opex_palette = ["#9D1DC8","#264653","#8484E8"]
primary_color = ["#9d1dc8"]
def save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix):
pd.DataFrame({'price': prices, 'hashrate': global_hash_rate, 'trials': n_trials}).to_csv(f"plots/{file_suffix}/env_values_{file_suffix}.csv", index = False)
user_positions.to_csv(f"plots/{file_suffix}/user_values_{file_suffix}.csv", index = False)
def get_environment_plots(prices, global_hash_rate, n_trials, title_suffix):
price_fig = update_layout_wrapper(px.line(x = list(range(len(prices))), y = prices,
labels = {"y": "Price (USD)", "x": "Day"},
title = f"Simulated Bitcoin Price over {n_trials} Trials {title_suffix}",
color_discrete_sequence = primary_color,
width=1600, height=900))
hashrate_fig = update_layout_wrapper(px.line(x = list(range(len(global_hash_rate))), y = global_hash_rate,
labels = {"y": "Hash Rate (EH/s)", "x": "Day"},
title = f"Simulated Bitcoin Network Hash Rate over {n_trials} Trials {title_suffix}",
color_discrete_sequence = primary_color,
width=1600, height=900))
return (price_fig, hashrate_fig)
def get_user_plots(user_positions, n_trials, title_suffix, elec_cost, palette):
user_positions_e_c = user_positions.loc[user_positions.elec_cost == elec_cost]
long_btc_fig = update_layout_wrapper(px.line(user_positions_e_c.loc[user_positions_e_c.strategy == constants.Strategy.LONG_BTC.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "machine_type",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "machine_type": "Machine Type "},
title = f"Simulated Position Value over {n_trials} Trials {title_suffix}, Long BTC, ${elec_cost} per kWh",
color_discrete_sequence = palette,
width=1600, height=900))
sell_daily_fig = update_layout_wrapper(px.line(user_positions_e_c.loc[user_positions_e_c.strategy == constants.Strategy.SELL_DAILY.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "machine_type",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "machine_type": "Machine Type "},
title = f"Simulated Position Value over {n_trials} Trials {title_suffix}, Selling Daily, ${elec_cost} per kWh",
color_discrete_sequence = palette,
width=1600, height=900))
return (long_btc_fig, sell_daily_fig)
def get_summary_plots(price_params, fee_params, block_subsidy, n_trials, title_suffix, file_suffix, user_machine_prices = config.machine_prices, elec_costs = [0.04, 0.07], palette = my_palette):
init_prices = PriceGenerator(price_params).generate_prices()
user_miners_long_btc, user_miners_sell_daily = UserMinerGenerator().generate_user_miners(machine_prices = user_machine_prices, elec_costs = elec_costs)
env_miners = MinerGenerator().generate_miner_distribution()
sim = Simulator(env_miners = env_miners,
user_miners_long_btc = user_miners_long_btc,
user_miners_sell_daily = user_miners_sell_daily,
prices = init_prices,
price_params = price_params,
fee_params = fee_params,
block_subsidy = block_subsidy)
sim.run_simulation_n_trials(n_trials)
user_positions = sim.get_avg_user_positions()
prices = sim.get_avg_prices()
global_hash_rate = sim.get_avg_global_hash_rate()
price_fig, hashrate_fig = get_environment_plots(prices, global_hash_rate, n_trials, title_suffix)
price_fig.write_image(f"plots/{file_suffix}/price_plot_{file_suffix}.png", scale=8)
hashrate_fig.write_image(f"plots/{file_suffix}/hashrate_plot_{file_suffix}.png", scale=8)
for elec_cost in user_positions.elec_cost.unique():
user_figs = get_user_plots(user_positions, n_trials, title_suffix, elec_cost, palette)
user_figs[0].write_image(f"plots/{file_suffix}/long_btc_plot_{file_suffix}_{int(elec_cost * 100)}.png", scale=8)
user_figs[1].write_image(f"plots/{file_suffix}/sell_daily_plot_{file_suffix}_{int(elec_cost * 100)}.png", scale=8)
save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix)
def get_user_opex_plots(user_positions, n_trials, title_suffix, machine_type, palette):
user_positions_m_t = user_positions.loc[user_positions.machine_type == machine_type.value]
long_btc_fig = update_layout_wrapper(px.line(user_positions_m_t.loc[user_positions_m_t.strategy == constants.Strategy.LONG_BTC.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "elec_cost",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "elec_cost": "Electricity Cost (USD/kWh) "},
title = f"Simulated Position Value over {n_trials} Trials using {machine_type.value} {title_suffix}, Long BTC",
color_discrete_sequence = palette,
width=1600, height=900))
sell_daily_fig = update_layout_wrapper(px.line(user_positions_m_t.loc[user_positions_m_t.strategy == constants.Strategy.SELL_DAILY.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "elec_cost",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "elec_cost": "Electricity Cost (USD/kWh) "},
title = f"Simulated Position Value over {n_trials} Trials using {machine_type.value} {title_suffix}, Selling Daily",
color_discrete_sequence = palette,
width=1600, height=900))
return (long_btc_fig, sell_daily_fig)
def get_summary_plots_opex(price_params, fee_params, block_subsidy, n_trials, title_suffix, file_suffix, user_machine_prices = config.machine_prices, elec_costs = [0.04, 0.07], palette = opex_palette):
init_prices = PriceGenerator(price_params).generate_prices()
user_miners_long_btc, user_miners_sell_daily = UserMinerGenerator().generate_user_miners(machine_prices = user_machine_prices, elec_costs = elec_costs)
env_miners = MinerGenerator().generate_miner_distribution()
sim = Simulator(env_miners = env_miners,
user_miners_long_btc = user_miners_long_btc,
user_miners_sell_daily = user_miners_sell_daily,
prices = init_prices,
price_params = price_params,
fee_params = fee_params,
block_subsidy = block_subsidy)
sim.run_simulation_n_trials(n_trials)
user_positions = sim.get_avg_user_positions()
prices = sim.get_avg_prices()
global_hash_rate = sim.get_avg_global_hash_rate()
price_fig, hashrate_fig = get_environment_plots(prices, global_hash_rate, n_trials, title_suffix)
price_fig.write_image(f"plots/{file_suffix}/price_plot_{file_suffix}.png", scale=8)
hashrate_fig.write_image(f"plots/{file_suffix}/hashrate_plot_{file_suffix}.png", scale=8)
for machine_type in user_machine_prices:
user_figs = get_user_opex_plots(user_positions, n_trials, title_suffix, machine_type, palette)
user_figs[0].write_image(f"plots/{file_suffix}/long_btc_plot_{file_suffix}_{machine_type.value}.png", scale=8)
user_figs[1].write_image(f"plots/{file_suffix}/sell_daily_plot_{file_suffix}_{machine_type.value}.png", scale=8)
save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix)
if __name__ == '__main__':
random.seed(1032009)
np.random.seed(1032009)
n_trials = 25
fee_params = CMDataLoader.get_historical_fee_params()
block_subsidy = 6.25
historical_price_params = CMDataLoader.get_historical_price_params()
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical")
bearish_price_params = (historical_price_params[0], -1 * abs(historical_price_params[1]), historical_price_params[2])
get_summary_plots(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish")
corrections_price_params = (historical_price_params[0], 0, historical_price_params[2] * 1.25)
get_summary_plots(corrections_price_params, fee_params, block_subsidy, n_trials, "in Bull Market with Corrections", "corrections")
s9_s19_prices = {key: config.machine_prices[key] for key in [constants.MachineName.ANTMINER_S9, constants.MachineName.ANTMINER_S19]}
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical-machines", s9_s19_prices, [0.03], hardware_palette)
get_summary_plots_opex(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish-opex", s9_s19_prices, [0.03, 0.04, 0.05], opex_palette)
| 8,331
| 0
| 138
|
2bd8edda0f4c727b34799dc6c9399a18d23a45a7
| 903
|
py
|
Python
|
src/testplates/impl/base/any_type.py
|
kprzybyla/testplates
|
156a373d9a0818c6074ec8d622d6ef1f867eafd3
|
[
"MIT"
] | null | null | null |
src/testplates/impl/base/any_type.py
|
kprzybyla/testplates
|
156a373d9a0818c6074ec8d622d6ef1f867eafd3
|
[
"MIT"
] | null | null | null |
src/testplates/impl/base/any_type.py
|
kprzybyla/testplates
|
156a373d9a0818c6074ec8d622d6ef1f867eafd3
|
[
"MIT"
] | null | null | null |
__all__ = (
"extract_value",
"extract_errors",
)
from typing import (
cast,
Any,
Type,
Union,
List,
)
from testplates.impl.value import (
MISSING,
)
from testplates.impl.exceptions import (
TestplatesError,
)
from .attrs import (
TESTPLATES_ERRORS_ATTR,
TESTPLATES_VALUE_ATTR,
)
def extract_value(
instance: Any,
) -> Any:
"""
Extracts value.
For internal use only.
"""
value = getattr(instance, TESTPLATES_VALUE_ATTR, MISSING)
return value
def extract_errors(
cls_or_instance: Union[Type[Any], Any],
) -> List[TestplatesError]:
"""
Extracts errors.
For internal use only.
"""
errors = getattr(cls_or_instance, TESTPLATES_ERRORS_ATTR, MISSING)
if errors is MISSING:
setattr(cls_or_instance, TESTPLATES_ERRORS_ATTR, errors := list())
return cast(List[TestplatesError], errors)
| 15.842105
| 74
| 0.657807
|
__all__ = (
"extract_value",
"extract_errors",
)
from typing import (
cast,
Any,
Type,
Union,
List,
)
from testplates.impl.value import (
MISSING,
)
from testplates.impl.exceptions import (
TestplatesError,
)
from .attrs import (
TESTPLATES_ERRORS_ATTR,
TESTPLATES_VALUE_ATTR,
)
def extract_value(
instance: Any,
) -> Any:
"""
Extracts value.
For internal use only.
"""
value = getattr(instance, TESTPLATES_VALUE_ATTR, MISSING)
return value
def extract_errors(
cls_or_instance: Union[Type[Any], Any],
) -> List[TestplatesError]:
"""
Extracts errors.
For internal use only.
"""
errors = getattr(cls_or_instance, TESTPLATES_ERRORS_ATTR, MISSING)
if errors is MISSING:
setattr(cls_or_instance, TESTPLATES_ERRORS_ATTR, errors := list())
return cast(List[TestplatesError], errors)
| 0
| 0
| 0
|
2d31ec77e7510699d2cc7549a1833146864202d2
| 7,699
|
py
|
Python
|
savu/plugins/alignment/projection_shift.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 39
|
2015-03-30T14:03:42.000Z
|
2022-03-16T16:50:33.000Z
|
savu/plugins/alignment/projection_shift.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 670
|
2015-02-11T11:08:09.000Z
|
2022-03-21T09:27:57.000Z
|
savu/plugins/alignment/projection_shift.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | 54
|
2015-02-13T14:09:52.000Z
|
2022-01-24T13:57:09.000Z
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: projection_shift
:platform: Unix
:synopsis: Calculate horizontal and vertical shifts in the projection\
images over time, using template matching.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import logging
import numpy as np
from skimage.feature import match_template, match_descriptors, ORB
from scipy.linalg import lstsq
from skimage.transform import AffineTransform
from skimage.measure import ransac
from savu.plugins.utils import register_plugin
from savu.plugins.filters.base_filter import BaseFilter
from savu.plugins.driver.cpu_plugin import CpuPlugin
@register_plugin
class ProjectionShift(BaseFilter, CpuPlugin):
"""
"""
| 38.113861
| 81
| 0.630471
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: projection_shift
:platform: Unix
:synopsis: Calculate horizontal and vertical shifts in the projection\
images over time, using template matching.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import logging
import numpy as np
from skimage.feature import match_template, match_descriptors, ORB
from scipy.linalg import lstsq
from skimage.transform import AffineTransform
from skimage.measure import ransac
from savu.plugins.utils import register_plugin
from savu.plugins.filters.base_filter import BaseFilter
from savu.plugins.driver.cpu_plugin import CpuPlugin
@register_plugin
class ProjectionShift(BaseFilter, CpuPlugin):
"""
"""
def __init__(self):
logging.debug("initialising Sinogram Alignment")
super(ProjectionShift, self).__init__("ProjectionShift")
self.template = None
self.threshold = 0
def pre_process(self):
if self.parameters['method'] == 'template_matching':
self.template_params = []
for p in self.parameters['template']:
start, end = p.split(':')
self.template_params.append(slice(int(start), int(end)))
self._calculate_shift = self._template_matching_shift
elif self.parameters['method'] == 'orb_ransac':
self._calculate_shift = self._orb_ransac_shift
if self.parameters['threshold']:
self.threshold = self.parameters['threshold']
self.sl = [slice(None)]*3
self.sl2 = [slice(None)]*3
self.slice_dir = self.get_plugin_in_datasets()[0].get_slice_dimension()
self.A = self._calculate_frame_matrix()
def _calculate_frame_matrix(self):
n_unknowns = self.get_max_frames() + 2 # 2 padded frames
frame_list = self._calculate_frame_list(np.arange(n_unknowns))
n_equations = len(frame_list)
A = np.zeros((n_equations, n_unknowns))
for i in range(len(frame_list)):
for f in frame_list[i][1:]:
A[i, f] = 1
return A
def process_frames(self, data):
data, nFrames, output, shift_array = self._initial_setup(data)
return self._sub_pixel_shift_adjustment(data)
def _initial_setup(self, data):
data = data[0]
shape = list(data.shape)
nFrames = data.shape[self.slice_dir]-2
shape[self.slice_dir] += -2
output = np.zeros(tuple(shape))
shift_array = np.zeros((nFrames, 2))
return data, nFrames, output, shift_array
def _get_shift(self, data, frame1, frame2):
self.sl[self.slice_dir] = frame1
self.sl2[self.slice_dir] = frame2
d1 = data[self.sl]
d2 = data[self.sl2]
if self.template:
self.template = data[self.sl][self.template_params]
if self.threshold:
d1[d1 > self.threshold[0]] = self.threshold[1]
d2[d2 > self.threshold[0]] = self.threshold[1]
if self.template:
self.template[self.template > self.threshold[0]] = \
self.threshold[1]
return self._calculate_shift(d1, d2, self.template)
def _orb_ransac_shift(self, im1, im2, template):
descriptor_extractor = ORB() #n_keypoints=self.parameters['n_keypoints'])
key1, des1 = self._find_key_points(descriptor_extractor, im1)
key2, des2 = self._find_key_points(descriptor_extractor, im2)
matches = match_descriptors(des1, des2, cross_check=True)
# estimate affine transform model using all coordinates
src = key1[matches[:, 0]]
dst = key2[matches[:, 1]]
# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst), AffineTransform,
min_samples=3, residual_threshold=1,
max_trials=100)
# diff = []
# for p1, p2 in zip(src[inliers], dst[inliers]):
# diff.append(p2-p1)
# return np.mean(diff, axis=0)
return model_robust.translation
def _find_key_points(self, desc_extractor, image):
desc_extractor.detect_and_extract(image)
keypoints = desc_extractor.keypoints
descriptors = desc_extractor.descriptors
return keypoints, descriptors
def _template_matching_shift(self, im1, im2, template):
index = []
for im in [im1, im2]:
match = match_template(im, template)
index.append(np.unravel_index(np.argmax(match), match.shape))
index = np.array(index)
shift = index[1] - index[0]
return shift
def _sub_pixel_shift_adjustment(self, data):
frame_list = \
self._calculate_frame_list(np.arange(data.shape[self.slice_dir]))
new_shift = []
for f in frame_list:
new_shift.append(
self._get_shift(data, f[0], f[-1]).astype(np.float64))
return self._calculate_new_shift_array(np.array(new_shift))
def _calculate_frame_list(self, frames):
sixes = list(zip(*(frames[i:] for i in range(6))))
fives = list(zip(*(frames[i:] for i in range(5))))
fours = list(zip(*(frames[i:] for i in range(4))))
threes = list(zip(*(frames[i:] for i in range(3))))
return sixes + fives + fours + threes
def _calculate_new_shift_array(self, shift):
new_shift = []
for i in range(2):
new_shift.append(lstsq(self.A, shift[:, i])[0])
return np.transpose(np.array(new_shift))[1:-1]
def post_process(self):
out_data = self.get_out_datasets()[0]
self.get_in_datasets()[0].meta_data.set(
'proj_align_shift_local', out_data.data[:, :])
self.get_in_datasets()[0].meta_data.set(
'proj_align_shift', np.cumsum(out_data.data[:, :], axis=0))
def get_max_frames(self):
# Do not change this number as 8 is currently a requirement.
return 8
def nOutput_datasets(self):
return 1
def setup(self):
# set up the output dataset that is created by the plugin
in_dataset, out_dataset = self.get_datasets()
in_pData, out_pData = self.get_plugin_datasets()
in_pData[0].plugin_data_setup('PROJECTION', self.get_max_frames(),
fixed=True)
new_shape = (in_dataset[0].get_shape()[
in_dataset[0].get_slice_directions()[0]], 2)
out_dataset[0].create_dataset(shape=new_shape,
axis_labels=['x.pixels', 'y.pixels'],
remove=True)
out_dataset[0].add_pattern("METADATA", core_dims=(1,), slice_dims=(0,))
out_pData[0].plugin_data_setup('METADATA', self.get_max_frames(),
fixed=True)
def set_filter_padding(self, in_data, out_data):
pad_dim = in_data[0].get_slice_directions()[0]
in_data[0].padding = {'pad_directions': [str(pad_dim) + '.1']}
#in_data[0].padding = {'pad_directions': [str(pad_dim) + '.before.1']}
| 5,948
| 0
| 459
|
6599a738ef385fda4ba36a0d5ccca971daa06188
| 458
|
py
|
Python
|
python_torch_test/test_index.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | null | null | null |
python_torch_test/test_index.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | null | null | null |
python_torch_test/test_index.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | null | null | null |
import torch
# cand_ids = torch.randint(0,10000,(10,))
# print(cand_ids.dtype)
# print(cand_ids.shape)
scores = torch.Tensor([[ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8],[ 9, 10, 11]])
a=scores[0:2]
# print(a)
# print(scores.dtype)
# print(scores.shape)
# scores[[1,2,3]]
# with torch.autograd.profiler.profile(record_shapes=True) as prof:
# print(scores[[1,2],[0,2]])
# print(prof.key_averages(group_by_input_shape=True).table(sort_by="self_cpu_time_total"))
| 26.941176
| 90
| 0.672489
|
import torch
# cand_ids = torch.randint(0,10000,(10,))
# print(cand_ids.dtype)
# print(cand_ids.shape)
scores = torch.Tensor([[ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8],[ 9, 10, 11]])
a=scores[0:2]
# print(a)
# print(scores.dtype)
# print(scores.shape)
# scores[[1,2,3]]
# with torch.autograd.profiler.profile(record_shapes=True) as prof:
# print(scores[[1,2],[0,2]])
# print(prof.key_averages(group_by_input_shape=True).table(sort_by="self_cpu_time_total"))
| 0
| 0
| 0
|
ff0aa4a18334faa1025b3b565c1234e541538556
| 4,378
|
py
|
Python
|
tests/test_workstation.py
|
soundstripe/jamberry
|
e5f7400ca274ceb357ef7098a32068f1b21db324
|
[
"MIT"
] | null | null | null |
tests/test_workstation.py
|
soundstripe/jamberry
|
e5f7400ca274ceb357ef7098a32068f1b21db324
|
[
"MIT"
] | null | null | null |
tests/test_workstation.py
|
soundstripe/jamberry
|
e5f7400ca274ceb357ef7098a32068f1b21db324
|
[
"MIT"
] | null | null | null |
import csv
from decimal import Decimal
from itertools import islice
from datetime import datetime, timedelta
import pytest
from bs4 import BeautifulSoup
from src.jamberry.workstation import extract_shipping_address, extract_line_items, parse_order_row_soup, \
JamberryWorkstation
# uncomment these lines to see requests
# import logging
# logging.basicConfig(level=logging.DEBUG)
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.usefixtures('order_detail_html')
@pytest.mark.usefixtures('order_detail_html')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.usefixtures('order_row_html')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
| 29.38255
| 106
| 0.731841
|
import csv
from decimal import Decimal
from itertools import islice
from datetime import datetime, timedelta
import pytest
from bs4 import BeautifulSoup
from src.jamberry.workstation import extract_shipping_address, extract_line_items, parse_order_row_soup, \
JamberryWorkstation
# uncomment these lines to see requests
# import logging
# logging.basicConfig(level=logging.DEBUG)
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_fetch_tar(ws):
# test fetching current TAR
tar = ws.fetch_team_activity_csv()
assert ws.logged_in
tar_str = str(tar, encoding='utf8')
i = tar_str.find('\n', 0, 1024)
# next line will raise an exception if there is a problem
tar_csv_dialect = csv.Sniffer().sniff(tar_str[:i])
assert tar_csv_dialect is not None
# test fetching last months TAR
t = datetime.now() - timedelta(weeks=35)
last_month_tar = ws.fetch_team_activity_csv(year=t.year, month=t.month)
last_month_tar_str = str(last_month_tar, encoding='utf8')
i = last_month_tar_str.find('\n', 0, 1024)
tar_csv_dialect = csv.Sniffer().sniff(last_month_tar_str[:i])
assert tar_csv_dialect is not None
# if they match, something is wrong with the date selector
assert tar_str != last_month_tar_str
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_login_logout(ws):
ws.login()
assert ws.logged_in
ws.logout()
assert not ws.logged_in
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_fetch_orders(ws):
orders = ws.fetch_orders()
assert ws.logged_in
assert orders is not None
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_fetch_archive_orders(ws):
orders = ws.fetch_archive_orders()
assert ws.logged_in
assert orders is not None
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_create_and_delete_tmp_search_cart_retail(ws):
ws.create_tmp_search_cart_retail()
assert ws._cart_url is not None
assert 'cart/display' in ws._cart_url
ws.delete_tmp_search_cart_retail()
resp = ws.br.get('https://workstation.jamberry.com/us/en/wscart')
assert b'tmpSearchRetail' not in resp.content
assert ws._cart_url is None
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_orders(ws):
order_generator = ws.orders()
first_50_orders = islice(order_generator, 0, 50)
orders = list(first_50_orders)
assert orders is not None
@pytest.mark.usefixtures('order_detail_html')
def test_extract_shipping_address(order_detail_html):
soup = BeautifulSoup(order_detail_html)
address = extract_shipping_address(soup)
lines = address.split('\n')
assert lines[0] == '123 Somewhere St'
assert lines[1] == 'Somewhere, CA 12345-6789'
@pytest.mark.usefixtures('order_detail_html')
def test_extract_line_items(order_detail_html):
soup = BeautifulSoup(order_detail_html)
line_items = extract_line_items(soup)
assert line_items[0].name == 'Cotton Candy Kisses'
assert line_items[0].quantity == 1
assert line_items[3].total == Decimal('19.20')
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_downline_consultants(ws):
for consultant, activity in ws.downline_consultants():
assert consultant.id is not None
assert activity.timestamp is not None
@pytest.mark.usefixtures('order_row_html')
def test_parse_order_row(order_row_html):
soup = BeautifulSoup(order_row_html).tr
order = parse_order_row_soup(soup)
assert order.id == '12345678'
assert order.customer_name == 'Foo Bar'
assert order.shipping_name == 'Foo Bar'
assert order.order_date == datetime(2017, 10, 1, 6, 0)
assert order.status == 'Shipped'
assert order.order_type == 'Party'
assert order.order_details_url == 'OrderDetails.aspx?id=12345678'
assert order.customer_id == '1234567'
assert order.hostess == 'Foo Manchu'
assert order.party == 'What a Party!'
assert order.ship_date == datetime(2017, 10, 1)
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_customers(ws):
for customer in ws.customers():
assert customer.name is not None
def test_ws_no_config_file():
with pytest.raises(IOError):
ws = JamberryWorkstation()
@pytest.mark.online
@pytest.mark.usefixtures('ws')
def test_catalog_products(ws):
for p in ws.catalog_products():
assert p.sku is not None
| 3,083
| 0
| 287
|
425fb837364f81528c19644a726599516ada187d
| 336
|
py
|
Python
|
address/migrations/0004_merge_20200328_0849.py
|
pedrovgp/cpm-django-address
|
f7c780aadb9a51df14677bd4681da073b68358cb
|
[
"BSD-3-Clause"
] | null | null | null |
address/migrations/0004_merge_20200328_0849.py
|
pedrovgp/cpm-django-address
|
f7c780aadb9a51df14677bd4681da073b68358cb
|
[
"BSD-3-Clause"
] | null | null | null |
address/migrations/0004_merge_20200328_0849.py
|
pedrovgp/cpm-django-address
|
f7c780aadb9a51df14677bd4681da073b68358cb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-03-28 11:49
from __future__ import unicode_literals
from django.db import migrations
| 19.764706
| 47
| 0.66369
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-03-28 11:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('address', '0003_auto_20170718_1615'),
('address', '0002_auto_20160213_1726'),
]
operations = [
]
| 0
| 167
| 23
|
1577d38738d1df21a60bdf096aa6ca9a02c9b4ef
| 2,391
|
py
|
Python
|
coref/clustering_utils.py
|
AndreFCruz/coref-web-platform
|
845fd5461aad19a0f221077dbfbfd1d01766f0d6
|
[
"MIT"
] | 9
|
2018-09-18T14:34:57.000Z
|
2022-01-28T12:34:50.000Z
|
coref/clustering_utils.py
|
AndreFCruz/coref-web-platform
|
845fd5461aad19a0f221077dbfbfd1d01766f0d6
|
[
"MIT"
] | null | null | null |
coref/clustering_utils.py
|
AndreFCruz/coref-web-platform
|
845fd5461aad19a0f221077dbfbfd1d01766f0d6
|
[
"MIT"
] | null | null | null |
"""
clustering_utils.py: utilitary functions for the clustering.py module.
"""
import numpy as np
from enum import IntEnum
from .utils import find_in_sequence
class Link(IntEnum):
"""
Represents state of coreferring links.
Must be negative integers to not interfere with the clustering process.
"""
NO_ANTECEDENT = -1
PROCESSED = -2
def cluster_labels_to_entity_clusters(cluster_labels):
"""
@cluster_labels is the second return from the affinity_matrix computation,
and cluster_labels[mention_idx] = mention's cluster
"""
clusters = dict()
for idx, label in enumerate(cluster_labels):
if label not in clusters:
clusters[label] = list()
clusters[label].append(idx)
return [clusters[key] for key in clusters.keys()]
def coreference_links_to_entity_clusters(links):
"""
Transforms the given array of coreference links into a set of entities (mention clusters).
Each entity/cluster is represented by the mentions' indices.
"""
clusters = []
for i in range(len(links) - 1, -1, -1):
new_cluster = set()
j = i
while True:
antecedent = links[j]
links[j] = Link.PROCESSED
new_cluster.add(j)
# end of coreference link
if antecedent == Link.NO_ANTECEDENT:
clusters.append(new_cluster)
break
# linking to previously processed cluster
elif antecedent == Link.PROCESSED:
previous_cluster_idx = find_in_sequence(lambda s: j in s, clusters)
clusters[previous_cluster_idx].update(new_cluster)
break
j = antecedent
return clusters
def generate_affinity_matrix(document, mention_pair_predictions):
"""
Generates an affinity/similarity matrix from the given mention-pair scores.
@returns affinity_matrix[m1_idx, m2_idx] = affinity_score
"""
num_mentions = len(document.mentions)
affinity_matrix = np.ndarray(shape=(num_mentions, num_mentions), dtype=np.float32)
affinity_matrix.fill(0)
for idx in range(len(mention_pair_predictions)):
i1, i2 = document.pairwise_combinations[idx]
affinity_matrix[i1,i2] = mention_pair_predictions[idx]
affinity_matrix[i2,i1] = mention_pair_predictions[idx]
return affinity_matrix
| 30.653846
| 94
| 0.662066
|
"""
clustering_utils.py: utilitary functions for the clustering.py module.
"""
import numpy as np
from enum import IntEnum
from .utils import find_in_sequence
class Link(IntEnum):
"""
Represents state of coreferring links.
Must be negative integers to not interfere with the clustering process.
"""
NO_ANTECEDENT = -1
PROCESSED = -2
def cluster_labels_to_entity_clusters(cluster_labels):
"""
@cluster_labels is the second return from the affinity_matrix computation,
and cluster_labels[mention_idx] = mention's cluster
"""
clusters = dict()
for idx, label in enumerate(cluster_labels):
if label not in clusters:
clusters[label] = list()
clusters[label].append(idx)
return [clusters[key] for key in clusters.keys()]
def coreference_links_to_entity_clusters(links):
"""
Transforms the given array of coreference links into a set of entities (mention clusters).
Each entity/cluster is represented by the mentions' indices.
"""
clusters = []
for i in range(len(links) - 1, -1, -1):
new_cluster = set()
j = i
while True:
antecedent = links[j]
links[j] = Link.PROCESSED
new_cluster.add(j)
# end of coreference link
if antecedent == Link.NO_ANTECEDENT:
clusters.append(new_cluster)
break
# linking to previously processed cluster
elif antecedent == Link.PROCESSED:
previous_cluster_idx = find_in_sequence(lambda s: j in s, clusters)
clusters[previous_cluster_idx].update(new_cluster)
break
j = antecedent
return clusters
def generate_affinity_matrix(document, mention_pair_predictions):
"""
Generates an affinity/similarity matrix from the given mention-pair scores.
@returns affinity_matrix[m1_idx, m2_idx] = affinity_score
"""
num_mentions = len(document.mentions)
affinity_matrix = np.ndarray(shape=(num_mentions, num_mentions), dtype=np.float32)
affinity_matrix.fill(0)
for idx in range(len(mention_pair_predictions)):
i1, i2 = document.pairwise_combinations[idx]
affinity_matrix[i1,i2] = mention_pair_predictions[idx]
affinity_matrix[i2,i1] = mention_pair_predictions[idx]
return affinity_matrix
| 0
| 0
| 0
|