hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d57e20647da81b0ea97e3b6ddf0c1e7911b5db6d
| 4,094
|
py
|
Python
|
space.py
|
kourgeorge/life
|
5162722e9bd53e5e84d704852139802bf99b46ce
|
[
"BSD-2-Clause"
] | 24
|
2018-12-04T13:35:54.000Z
|
2022-02-28T05:52:38.000Z
|
space.py
|
kourgeorge/life
|
5162722e9bd53e5e84d704852139802bf99b46ce
|
[
"BSD-2-Clause"
] | 5
|
2018-12-09T14:41:21.000Z
|
2020-08-28T10:35:56.000Z
|
space.py
|
kourgeorge/life
|
5162722e9bd53e5e84d704852139802bf99b46ce
|
[
"BSD-2-Clause"
] | 3
|
2020-04-18T03:29:19.000Z
|
2020-08-20T15:03:35.000Z
|
__author__ = 'gkour'
from cell import Cell
import numpy as np
from itertools import chain
class Space:
def __init__(self, space_size):
self._space_size = space_size
self._grid = []
for i in range(space_size):
self._grid = [[Cell((i, j)) for j in range(self._space_size)] for i in range(self._space_size)]
def grid(self):
return self._grid
def cells(self):
return list(chain.from_iterable(self._grid))
def update_sounds(self, time):
for cell in self.cells():
cell.remove_sounds(time)
def insert_creature(self, creature, coord):
if not self.valid_coord(coord):
print("Exception: bad coordinated in space.insert_creature")
return None
cell = self._grid[coord[0]][coord[1]]
cell.insert_creature(creature)
return cell
def add_food(self, coord, amount):
if not self.valid_coord(coord):
print("Exception: bad coordinated in space.add_food")
return None
cell = self._grid[coord[0]][coord[1]]
cell.add_food(amount)
return cell
def remove_creature(self, creature):
x, y = creature.coord()
self._grid[x][y].remove_creature(creature)
def get_state_in_coord(self, coord, vision_range, races):
if not self.valid_coord(coord):
raise Exception("Exception: bad coordinated in space.get_state_in_coord")
state_dim_size = 2 * vision_range + 1
dims = len(races) + 2 # races, food, and sound
state = np.ones([dims, state_dim_size, state_dim_size]) * -1
for i in range(state_dim_size):
for j in range(state_dim_size):
abs_i = coord[0] - vision_range + i
abs_j = coord[1] - vision_range + j
if 0 <= abs_i < self._space_size and 0 <= abs_j < self._space_size:
state[:, i, j] = self._grid[abs_i][abs_j].get_state_in_cell(races)
return state
def get_all_creatures(self):
return [creature for cell in self.cells() for creature in cell.creatures()]
def get_food_distribution(self):
return [[self._grid[i][j].get_food() for j in range(self._space_size)] for i in range(self._space_size)]
def get_creatures_distribution(self):
return [[self._grid[i][j].num_creatures() for j in range(self._space_size)] for i in range(self._space_size)]
def get_sounds_distribution(self):
return [[len(self._grid[i][j].get_sounds()) for j in range(self._space_size)] for i in range(self._space_size)]
def valid_coord(self, coord):
x, y = coord
return 0 <= x < self._space_size and 0 <= y < self._space_size
def find_nearby_creature(self, creature):
nearby_creatures = creature.cell().creatures()
if len(nearby_creatures) < 2:
return None
others = [creat for creat in nearby_creatures if creat != creature]
return np.random.permutation(others)[0]
def find_nearby_creature_from_same_race(self, creature):
others = self.get_nearby_creatures_from_same_race(creature)
if others:
return np.random.permutation(others)[0]
return None
def find_nearby_creature_from_different_race(self, creature):
others = self.get_nearby_creatures_from_different_race(creature)
if others:
return np.random.permutation(others)[0]
return None
def get_nearby_creatures_from_same_race(self, creature):
return [creat for creat in creature.cell().creatures() if
creat != creature and creat.race_name() == creature.race_name()]
def get_nearby_creatures_from_different_race(self, creature):
return [creat for creat in creature.cell().creatures() if creat.race_name() != creature.race_name()]
def __str__(self):
string = ''
for cell in self._grid:
string = string + str(cell) + ' '
return string
| 37.907407
| 120
| 0.624328
|
65ff082c242a54d9d1ce0365fb35e00f6cca015c
| 45,653
|
py
|
Python
|
opengl/gl/raw/gl_4_3.py
|
SilentPenguin/OpenGL.py
|
dd16bf7ea2fa20a7ea489e711a5df20d604c34dc
|
[
"Apache-2.0"
] | 1
|
2016-11-09T06:11:24.000Z
|
2016-11-09T06:11:24.000Z
|
opengl/gl/raw/gl_4_3.py
|
SilentPenguin/OpenGL.py
|
dd16bf7ea2fa20a7ea489e711a5df20d604c34dc
|
[
"Apache-2.0"
] | 3
|
2016-11-09T06:21:08.000Z
|
2016-11-18T15:17:22.000Z
|
opengl/gl/raw/gl_4_3.py
|
SilentPenguin/OpenGL.py
|
dd16bf7ea2fa20a7ea489e711a5df20d604c34dc
|
[
"Apache-2.0"
] | null | null | null |
#BEWARE: automatically generated code
#This code was generated by /generate/__main__.py
from opengl.gl.raw.bindings import *
NUM_SHADING_LANGUAGE_VERSIONS = 0x82E9
VERTEX_ATTRIB_ARRAY_LONG = 0x874E
COMPRESSED_RGB8_ETC2 = 0x9274
COMPRESSED_SRGB8_ETC2 = 0x9275
COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9276
COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9277
COMPRESSED_RGBA8_ETC2_EAC = 0x9278
COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 0x9279
COMPRESSED_R11_EAC = 0x9270
COMPRESSED_SIGNED_R11_EAC = 0x9271
COMPRESSED_RG11_EAC = 0x9272
COMPRESSED_SIGNED_RG11_EAC = 0x9273
PRIMITIVE_RESTART_FIXED_INDEX = 0x8D69
ANY_SAMPLES_PASSED_CONSERVATIVE = 0x8D6A
MAX_ELEMENT_INDEX = 0x8D6B
COMPUTE_SHADER = 0x91B9
MAX_COMPUTE_UNIFORM_BLOCKS = 0x91BB
MAX_COMPUTE_TEXTURE_IMAGE_UNITS = 0x91BC
MAX_COMPUTE_IMAGE_UNIFORMS = 0x91BD
MAX_COMPUTE_SHARED_MEMORY_SIZE = 0x8262
MAX_COMPUTE_UNIFORM_COMPONENTS = 0x8263
MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS = 0x8264
MAX_COMPUTE_ATOMIC_COUNTERS = 0x8265
MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS = 0x8266
MAX_COMPUTE_WORK_GROUP_INVOCATIONS = 0x90EB
MAX_COMPUTE_WORK_GROUP_COUNT = 0x91BE
MAX_COMPUTE_WORK_GROUP_SIZE = 0x91BF
COMPUTE_WORK_GROUP_SIZE = 0x8267
UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER = 0x90EC
ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER = 0x90ED
DISPATCH_INDIRECT_BUFFER = 0x90EE
DISPATCH_INDIRECT_BUFFER_BINDING = 0x90EF
COMPUTE_SHADER_BIT = 0x00000020
DEBUG_OUTPUT_SYNCHRONOUS = 0x8242
DEBUG_NEXT_LOGGED_MESSAGE_LENGTH = 0x8243
DEBUG_CALLBACK_FUNCTION = 0x8244
DEBUG_CALLBACK_USER_PARAM = 0x8245
DEBUG_SOURCE_API = 0x8246
DEBUG_SOURCE_WINDOW_SYSTEM = 0x8247
DEBUG_SOURCE_SHADER_COMPILER = 0x8248
DEBUG_SOURCE_THIRD_PARTY = 0x8249
DEBUG_SOURCE_APPLICATION = 0x824A
DEBUG_SOURCE_OTHER = 0x824B
DEBUG_TYPE_ERROR = 0x824C
DEBUG_TYPE_DEPRECATED_BEHAVIOR = 0x824D
DEBUG_TYPE_UNDEFINED_BEHAVIOR = 0x824E
DEBUG_TYPE_PORTABILITY = 0x824F
DEBUG_TYPE_PERFORMANCE = 0x8250
DEBUG_TYPE_OTHER = 0x8251
MAX_DEBUG_MESSAGE_LENGTH = 0x9143
MAX_DEBUG_LOGGED_MESSAGES = 0x9144
DEBUG_LOGGED_MESSAGES = 0x9145
DEBUG_SEVERITY_HIGH = 0x9146
DEBUG_SEVERITY_MEDIUM = 0x9147
DEBUG_SEVERITY_LOW = 0x9148
DEBUG_TYPE_MARKER = 0x8268
DEBUG_TYPE_PUSH_GROUP = 0x8269
DEBUG_TYPE_POP_GROUP = 0x826A
DEBUG_SEVERITY_NOTIFICATION = 0x826B
MAX_DEBUG_GROUP_STACK_DEPTH = 0x826C
DEBUG_GROUP_STACK_DEPTH = 0x826D
BUFFER = 0x82E0
SHADER = 0x82E1
PROGRAM = 0x82E2
VERTEX_ARRAY = 0x8074
QUERY = 0x82E3
PROGRAM_PIPELINE = 0x82E4
SAMPLER = 0x82E6
MAX_LABEL_LENGTH = 0x82E8
DEBUG_OUTPUT = 0x92E0
CONTEXT_FLAG_DEBUG_BIT = 0x00000002
MAX_UNIFORM_LOCATIONS = 0x826E
FRAMEBUFFER_DEFAULT_WIDTH = 0x9310
FRAMEBUFFER_DEFAULT_HEIGHT = 0x9311
FRAMEBUFFER_DEFAULT_LAYERS = 0x9312
FRAMEBUFFER_DEFAULT_SAMPLES = 0x9313
FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS = 0x9314
MAX_FRAMEBUFFER_WIDTH = 0x9315
MAX_FRAMEBUFFER_HEIGHT = 0x9316
MAX_FRAMEBUFFER_LAYERS = 0x9317
MAX_FRAMEBUFFER_SAMPLES = 0x9318
INTERNALFORMAT_SUPPORTED = 0x826F
INTERNALFORMAT_PREFERRED = 0x8270
INTERNALFORMAT_RED_SIZE = 0x8271
INTERNALFORMAT_GREEN_SIZE = 0x8272
INTERNALFORMAT_BLUE_SIZE = 0x8273
INTERNALFORMAT_ALPHA_SIZE = 0x8274
INTERNALFORMAT_DEPTH_SIZE = 0x8275
INTERNALFORMAT_STENCIL_SIZE = 0x8276
INTERNALFORMAT_SHARED_SIZE = 0x8277
INTERNALFORMAT_RED_TYPE = 0x8278
INTERNALFORMAT_GREEN_TYPE = 0x8279
INTERNALFORMAT_BLUE_TYPE = 0x827A
INTERNALFORMAT_ALPHA_TYPE = 0x827B
INTERNALFORMAT_DEPTH_TYPE = 0x827C
INTERNALFORMAT_STENCIL_TYPE = 0x827D
MAX_WIDTH = 0x827E
MAX_HEIGHT = 0x827F
MAX_DEPTH = 0x8280
MAX_LAYERS = 0x8281
MAX_COMBINED_DIMENSIONS = 0x8282
COLOR_COMPONENTS = 0x8283
DEPTH_COMPONENTS = 0x8284
STENCIL_COMPONENTS = 0x8285
COLOR_RENDERABLE = 0x8286
DEPTH_RENDERABLE = 0x8287
STENCIL_RENDERABLE = 0x8288
FRAMEBUFFER_RENDERABLE = 0x8289
FRAMEBUFFER_RENDERABLE_LAYERED = 0x828A
FRAMEBUFFER_BLEND = 0x828B
READ_PIXELS = 0x828C
READ_PIXELS_FORMAT = 0x828D
READ_PIXELS_TYPE = 0x828E
TEXTURE_IMAGE_FORMAT = 0x828F
TEXTURE_IMAGE_TYPE = 0x8290
GET_TEXTURE_IMAGE_FORMAT = 0x8291
GET_TEXTURE_IMAGE_TYPE = 0x8292
MIPMAP = 0x8293
MANUAL_GENERATE_MIPMAP = 0x8294
AUTO_GENERATE_MIPMAP = 0x8295
COLOR_ENCODING = 0x8296
SRGB_READ = 0x8297
SRGB_WRITE = 0x8298
FILTER = 0x829A
VERTEX_TEXTURE = 0x829B
TESS_CONTROL_TEXTURE = 0x829C
TESS_EVALUATION_TEXTURE = 0x829D
GEOMETRY_TEXTURE = 0x829E
FRAGMENT_TEXTURE = 0x829F
COMPUTE_TEXTURE = 0x82A0
TEXTURE_SHADOW = 0x82A1
TEXTURE_GATHER = 0x82A2
TEXTURE_GATHER_SHADOW = 0x82A3
SHADER_IMAGE_LOAD = 0x82A4
SHADER_IMAGE_STORE = 0x82A5
SHADER_IMAGE_ATOMIC = 0x82A6
IMAGE_TEXEL_SIZE = 0x82A7
IMAGE_COMPATIBILITY_CLASS = 0x82A8
IMAGE_PIXEL_FORMAT = 0x82A9
IMAGE_PIXEL_TYPE = 0x82AA
SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST = 0x82AC
SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST = 0x82AD
SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE = 0x82AE
SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE = 0x82AF
TEXTURE_COMPRESSED_BLOCK_WIDTH = 0x82B1
TEXTURE_COMPRESSED_BLOCK_HEIGHT = 0x82B2
TEXTURE_COMPRESSED_BLOCK_SIZE = 0x82B3
CLEAR_BUFFER = 0x82B4
TEXTURE_VIEW = 0x82B5
VIEW_COMPATIBILITY_CLASS = 0x82B6
FULL_SUPPORT = 0x82B7
CAVEAT_SUPPORT = 0x82B8
IMAGE_CLASS_4_X_32 = 0x82B9
IMAGE_CLASS_2_X_32 = 0x82BA
IMAGE_CLASS_1_X_32 = 0x82BB
IMAGE_CLASS_4_X_16 = 0x82BC
IMAGE_CLASS_2_X_16 = 0x82BD
IMAGE_CLASS_1_X_16 = 0x82BE
IMAGE_CLASS_4_X_8 = 0x82BF
IMAGE_CLASS_2_X_8 = 0x82C0
IMAGE_CLASS_1_X_8 = 0x82C1
IMAGE_CLASS_11_11_10 = 0x82C2
IMAGE_CLASS_10_10_10_2 = 0x82C3
VIEW_CLASS_128_BITS = 0x82C4
VIEW_CLASS_96_BITS = 0x82C5
VIEW_CLASS_64_BITS = 0x82C6
VIEW_CLASS_48_BITS = 0x82C7
VIEW_CLASS_32_BITS = 0x82C8
VIEW_CLASS_24_BITS = 0x82C9
VIEW_CLASS_16_BITS = 0x82CA
VIEW_CLASS_8_BITS = 0x82CB
VIEW_CLASS_S3TC_DXT1_RGB = 0x82CC
VIEW_CLASS_S3TC_DXT1_RGBA = 0x82CD
VIEW_CLASS_S3TC_DXT3_RGBA = 0x82CE
VIEW_CLASS_S3TC_DXT5_RGBA = 0x82CF
VIEW_CLASS_RGTC1_RED = 0x82D0
VIEW_CLASS_RGTC2_RG = 0x82D1
VIEW_CLASS_BPTC_UNORM = 0x82D2
VIEW_CLASS_BPTC_FLOAT = 0x82D3
UNIFORM = 0x92E1
UNIFORM_BLOCK = 0x92E2
PROGRAM_INPUT = 0x92E3
PROGRAM_OUTPUT = 0x92E4
BUFFER_VARIABLE = 0x92E5
SHADER_STORAGE_BLOCK = 0x92E6
VERTEX_SUBROUTINE = 0x92E8
TESS_CONTROL_SUBROUTINE = 0x92E9
TESS_EVALUATION_SUBROUTINE = 0x92EA
GEOMETRY_SUBROUTINE = 0x92EB
FRAGMENT_SUBROUTINE = 0x92EC
COMPUTE_SUBROUTINE = 0x92ED
VERTEX_SUBROUTINE_UNIFORM = 0x92EE
TESS_CONTROL_SUBROUTINE_UNIFORM = 0x92EF
TESS_EVALUATION_SUBROUTINE_UNIFORM = 0x92F0
GEOMETRY_SUBROUTINE_UNIFORM = 0x92F1
FRAGMENT_SUBROUTINE_UNIFORM = 0x92F2
COMPUTE_SUBROUTINE_UNIFORM = 0x92F3
TRANSFORM_FEEDBACK_VARYING = 0x92F4
ACTIVE_RESOURCES = 0x92F5
MAX_NAME_LENGTH = 0x92F6
MAX_NUM_ACTIVE_VARIABLES = 0x92F7
MAX_NUM_COMPATIBLE_SUBROUTINES = 0x92F8
NAME_LENGTH = 0x92F9
TYPE = 0x92FA
ARRAY_SIZE = 0x92FB
OFFSET = 0x92FC
BLOCK_INDEX = 0x92FD
ARRAY_STRIDE = 0x92FE
MATRIX_STRIDE = 0x92FF
IS_ROW_MAJOR = 0x9300
ATOMIC_COUNTER_BUFFER_INDEX = 0x9301
BUFFER_BINDING = 0x9302
BUFFER_DATA_SIZE = 0x9303
NUM_ACTIVE_VARIABLES = 0x9304
ACTIVE_VARIABLES = 0x9305
REFERENCED_BY_VERTEX_SHADER = 0x9306
REFERENCED_BY_TESS_CONTROL_SHADER = 0x9307
REFERENCED_BY_TESS_EVALUATION_SHADER = 0x9308
REFERENCED_BY_GEOMETRY_SHADER = 0x9309
REFERENCED_BY_FRAGMENT_SHADER = 0x930A
REFERENCED_BY_COMPUTE_SHADER = 0x930B
TOP_LEVEL_ARRAY_SIZE = 0x930C
TOP_LEVEL_ARRAY_STRIDE = 0x930D
LOCATION = 0x930E
LOCATION_INDEX = 0x930F
IS_PER_PATCH = 0x92E7
SHADER_STORAGE_BUFFER = 0x90D2
SHADER_STORAGE_BUFFER_BINDING = 0x90D3
SHADER_STORAGE_BUFFER_START = 0x90D4
SHADER_STORAGE_BUFFER_SIZE = 0x90D5
MAX_VERTEX_SHADER_STORAGE_BLOCKS = 0x90D6
MAX_GEOMETRY_SHADER_STORAGE_BLOCKS = 0x90D7
MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS = 0x90D8
MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS = 0x90D9
MAX_FRAGMENT_SHADER_STORAGE_BLOCKS = 0x90DA
MAX_COMPUTE_SHADER_STORAGE_BLOCKS = 0x90DB
MAX_COMBINED_SHADER_STORAGE_BLOCKS = 0x90DC
MAX_SHADER_STORAGE_BUFFER_BINDINGS = 0x90DD
MAX_SHADER_STORAGE_BLOCK_SIZE = 0x90DE
SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT = 0x90DF
SHADER_STORAGE_BARRIER_BIT = 0x00002000
MAX_COMBINED_SHADER_OUTPUT_RESOURCES = 0x8F39
DEPTH_STENCIL_TEXTURE_MODE = 0x90EA
TEXTURE_BUFFER_OFFSET = 0x919D
TEXTURE_BUFFER_SIZE = 0x919E
TEXTURE_BUFFER_OFFSET_ALIGNMENT = 0x919F
TEXTURE_VIEW_MIN_LEVEL = 0x82DB
TEXTURE_VIEW_NUM_LEVELS = 0x82DC
TEXTURE_VIEW_MIN_LAYER = 0x82DD
TEXTURE_VIEW_NUM_LAYERS = 0x82DE
TEXTURE_IMMUTABLE_LEVELS = 0x82DF
VERTEX_ATTRIB_BINDING = 0x82D4
VERTEX_ATTRIB_RELATIVE_OFFSET = 0x82D5
VERTEX_BINDING_DIVISOR = 0x82D6
VERTEX_BINDING_OFFSET = 0x82D7
VERTEX_BINDING_STRIDE = 0x82D8
MAX_VERTEX_ATTRIB_RELATIVE_OFFSET = 0x82D9
MAX_VERTEX_ATTRIB_BINDINGS = 0x82DA
VERTEX_BINDING_BUFFER = 0x8F4F
@accepts(t.enum, t.enum, t.enum, t.enum, t.void)
@returns(t.void)
@binds(dll)
def clear_buffer_data(target, internalformat, format, type, data):
'''
fill a buffer object's data store with a fixed value.
gl.clear_buffer_data and gl.clear_named_buffer_data fill the entirety of a
buffer object's data store with data from client memory.
Args:
target: the target to which the buffer object is bound for
glclearbufferdata, which must must be one of the buffer binding
targets in the following table:.
internalformat: the internal format with which the data will be stored
in the buffer object.
format: the format of the data in memory addressed by data.
type: the type of the data in memory addressed by data.
data: the address of a memory location storing the data to be replicated
into the buffer's data store.
'''
@accepts(t.enum, t.enum, t.intptr, t.sizeiptr, t.enum, t.enum, t.void)
@returns(t.void)
@binds(dll)
def clear_buffer_sub_data(target, internalformat, offset, size, format, type, data):
'''
fill all or part of buffer object's data store with a fixed value.
gl.clear_buffer_sub_data and gl.clear_named_buffer_sub_data fill a specified
region of a buffer object's data store with data from client memory.
Args:
target: the target to which the buffer object is bound for
glclearbuffersubdata, which must be one of the buffer binding
targets in the following table:.
internalformat: the internal format with which the data will be stored
in the buffer object.
offset: the offset in basic machine units into the buffer object's data
store at which to start filling.
size: the size in basic machine units of the range of the data store to
fill.
format: the format of the data in memory addressed by data.
type: the type of the data in memory addressed by data.
data: the address of a memory location storing the data to be replicated
into the buffer's data store.
'''
@accepts(t.uint, t.uint, t.uint)
@returns(t.void)
@binds(dll)
def dispatch_compute(num_groups_x, num_groups_y, num_groups_z):
'''
launch one or more compute work groups.
gl.dispatch_compute launches one or more compute work groups. Each work
group is processed by the active program object for the compute shader
stage. While the individual shader invocations within a work group are
executed as a unit, work groups are executed completely independently and in
unspecified order. num_groups_x, num_groups_y and num_groups_z specify the
number of local work groups that will be dispatched in the X, Y and Z
dimensions, respectively.
Args:
num_groups_x: the number of work groups to be launched in the x
dimension.
num_groups_y: the number of work groups to be launched in the y
dimension.
num_groups_z: the number of work groups to be launched in the z
dimension.
'''
@accepts(t.intptr)
@returns(t.void)
@binds(dll)
def dispatch_compute_indirect(indirect):
'''
launch one or more compute work groups using parameters stored in a buffer.
gl.dispatch_compute_indirect launches one or more compute work groups using
parameters stored in the buffer object currently bound to the
gl.DISPATCH_INDIRECT_BUFFER target. Each work group is processed by the
active program object for the compute shader stage. While the individual
shader invocations within a work group are executed as a unit, work groups
are executed completely independently and in unspecified order. indirect
contains the offset into the data store of the buffer object bound to the
gl.DISPATCH_INDIRECT_BUFFER target at which the parameters are stored.
Args:
indirect: the offset into the buffer object currently bound to the
gl_dispatch_indirect_buffer buffer target at which the dispatch
parameters are stored.
'''
@accepts(t.uint, t.enum, t.int, t.int, t.int, t.int, t.uint, t.enum, t.int, t.int, t.int, t.int, t.sizei, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def copy_image_sub_data(srcname, srctarget, srclevel, srcx, srcy, srcz, dstname, dsttarget, dstlevel, dstx, dsty, dstz, srcwidth, srcheight, srcdepth):
'''
perform a raw data copy between two images.
gl.copy_image_sub_data may be used to copy data from one image to another.
gl.copy_image_sub_data does not perform general-purpose conversions such as
scaling, resizing, blending, color-space, or format conversions.
Args:
srcname: the name of a texture or renderbuffer object from which to
copy.
srctarget: the target representing the namespace of the source name
srcname.
srclevel: the mipmap level to read from the source.
srcx: the x coordinate of the left edge of the souce region to copy.
srcy: the y coordinate of the top edge of the souce region to copy.
srcz: the z coordinate of the near edge of the souce region to copy.
dstname: the name of a texture or renderbuffer object to which to copy.
dsttarget: the target representing the namespace of the destination name
dstname.
dstx: the x coordinate of the left edge of the destination region.
dsty: the y coordinate of the top edge of the destination region.
dstz: the z coordinate of the near edge of the destination region.
srcwidth: the width of the region to be copied.
srcheight: the height of the region to be copied.
srcdepth: the depth of the region to be copied.
'''
@accepts(t.enum, t.enum, t.int)
@returns(t.void)
@binds(dll)
def framebuffer_parameteri(target, pname, param):
'''
set a named parameter of a framebuffer object.
gl.framebuffer_parameteri and gl.named_framebuffer_parameteri modify the
value of the parameter named pname in the specified framebuffer object.
There are no modifiable parameters of the default draw and read framebuffer,
so they are not valid targets of these commands.
Args:
target: the target to which the framebuffer is bound for
glframebufferparameteri.
pname: the framebuffer parameter to be modified.
param: the new value for the parameter named pname.
'''
@accepts(t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_framebuffer_parameteriv(target, pname, params):
pass
@accepts(t.enum, t.enum, t.enum, t.sizei, POINTER(t.int64))
@returns(t.void)
@binds(dll)
def get_internalformati64v(target, internalformat, pname, bufsize, params):
pass
@accepts(t.uint, t.int, t.int, t.int, t.int, t.sizei, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def invalidate_tex_sub_image(texture, level, xoffset, yoffset, zoffset, width, height, depth):
'''
invalidate a region of a texture image.
gl.invalidate_tex_sub_image invalidates all or part of a texture image.
texture and level indicated which texture image is being invalidated. After
this command, data in that subregion have undefined values. xoffset,
yoffset, zoffset, width, height, and depth are interpreted as they are in
gl.tex_sub_image3D.
Args:
texture: the name of a texture object a subregion of which to
invalidate.
level: the level of detail of the texture object within which the region
resides.
xoffset: the x offset of the region to be invalidated.
yoffset: the y offset of the region to be invalidated.
zoffset: the z offset of the region to be invalidated.
width: the width of the region to be invalidated.
height: the height of the region to be invalidated.
depth: the depth of the region to be invalidated.
'''
@accepts(t.uint, t.int)
@returns(t.void)
@binds(dll)
def invalidate_tex_image(texture, level):
'''
invalidate the entirety a texture image.
Args:
texture: the name of a texture object to invalidate.
level: the level of detail of the texture object to invalidate.
'''
@accepts(t.uint, t.intptr, t.sizeiptr)
@returns(t.void)
@binds(dll)
def invalidate_buffer_sub_data(buffer, offset, length):
'''
invalidate a region of a buffer object's data store.
gl.invalidate_buffer_sub_data invalidates all or part of the content of the
data store of a buffer object. After invalidation, the content of the
specified range of the buffer's data store becomes undefined. The start of
the range is given by offset and its size is given by length, both measured
in basic machine units.
Args:
buffer: the name of a buffer object, a subrange of whose data store to
invalidate.
offset: the offset within the buffer's data store of the start of the
range to be invalidated.
length: the length of the range within the buffer's data store to be
invalidated.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def invalidate_buffer_data(buffer):
'''
invalidate the content of a buffer object's data store.
gl.invalidate_buffer_data invalidates all of the content of the data store
of a buffer object. After invalidation, the content of the buffer's data
store becomes undefined.
Args:
buffer: the name of a buffer object whose data store to invalidate.
'''
@accepts(t.enum, t.sizei, POINTER(t.enum))
@returns(t.void)
@binds(dll)
def invalidate_framebuffer(target, numattachments, attachments):
'''
invalidate the content of some or all of a framebuffer's attachments.
gl.invalidate_framebuffer and gl.invalidate_named_framebuffer_data
invalidate the entire contents of a specified set of attachments of a
framebuffer.
Args:
target: the target to which the framebuffer object is attached for
glinvalidateframebuffer.
numattachments: the number of entries in the attachments array.
attachments: a pointer to an array identifying the attachments to be
invalidated.
'''
@accepts(t.enum, t.sizei, POINTER(t.enum), t.int, t.int, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def invalidate_sub_framebuffer(target, numattachments, attachments, x, y, width, height):
'''
invalidate the content of a region of some or all of a framebuffer's
attachments.
gl.invalidate_sub_framebuffer and gl.invalidate_named_framebuffer_sub_data
invalidate the contents of a specified region of a specified set of
attachments of a framebuffer.
Args:
target: the target to which the framebuffer object is attached for
glinvalidatesubframebuffer.
numattachments: the number of entries in the attachments array.
attachments: a pointer to an array identifying the attachments to be
invalidated.
x: the x offset of the region to be invalidated.
y: the y offset of the region to be invalidated.
width: the width of the region to be invalidated.
height: the height of the region to be invalidated.
'''
@accepts(t.enum, t.void, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def multi_draw_arrays_indirect(mode, indirect, drawcount, stride):
'''
render multiple sets of primitives from array data, taking parameters from
memory.
gl.multi_draw_arrays_indirect specifies multiple geometric primitives with
very few subroutine calls. gl.multi_draw_arrays_indirect behaves similarly
to a multitude of calls to gl.draw_arrays_instanced_base_instance, execept
that the parameters to each call to gl.draw_arrays_instanced_base_instance
are stored in an array in memory at the address given by indirect, separated
by the stride, in basic machine units, specified by stride. If stride is
zero, then the array is assumed to be tightly packed in memory.
Args:
mode: what kind of primitives to render.
indirect: the address of an array of structures containing the draw
parameters.
drawcount: the the number of elements in the array of draw parameter
structures.
stride: the distance in basic machine units between elements of the draw
parameter array.
'''
@accepts(t.enum, t.enum, t.void, t.sizei, t.sizei)
@returns(t.void)
@binds(dll)
def multi_draw_elements_indirect(mode, type, indirect, drawcount, stride):
'''
render indexed primitives from array data, taking parameters from memory.
gl.multi_draw_elements_indirect specifies multiple indexed geometric
primitives with very few subroutine calls. gl.multi_draw_elements_indirect
behaves similarly to a multitude of calls to
gl.draw_elements_instanced_base_vertex_base_instance, execpt that the
parameters to gl.draw_elements_instanced_base_vertex_base_instance are
stored in an array in memory at the address given by indirect, separated by
the stride, in basic machine units, specified by stride. If stride is zero,
then the array is assumed to be tightly packed in memory.
Args:
mode: what kind of primitives to render.
type: the type of data in the buffer bound to the
gl_element_array_buffer binding.
indirect: the address of a structure containing an array of draw
parameters.
drawcount: the number of elements in the array addressed by indirect.
stride: the distance in basic machine units between elements of the draw
parameter array.
'''
@accepts(t.uint, t.enum, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_program_interfaceiv(program, programinterface, pname, params):
pass
@accepts(t.uint, t.enum, t.char_p)
@returns(t.uint)
@binds(dll)
def get_program_resource_index(program, programinterface, name):
'''
query the index of a named resource within a program.
gl.get_program_resource_index returns the unsigned integer index assigned to
a resource named name in the interface type programInterface of program
object program.
Args:
program: the name of a program object whose resources to query.
programinterface: a token identifying the interface within program
containing the resource named name.
name: the name of the resource to query the index of.
'''
@accepts(t.uint, t.enum, t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_program_resource_name(program, programinterface, index, bufsize, length, name):
'''
query the name of an indexed resource within a program.
gl.get_program_resource_name retrieves the name string assigned to the
single active resource with an index of index in the interface
programInterface of program object program. index must be less than the
number of entries in the active resource list for programInterface.
Args:
program: the name of a program object whose resources to query.
programinterface: a token identifying the interface within program
containing the indexed resource.
index: the index of the resource within programinterface of program.
bufsize: the size of the character array whose address is given by name.
length: the address of a variable which will receive the length of the
resource name.
name: the address of a character array into which will be written the
name of the resource.
'''
@accepts(t.uint, t.enum, t.uint, t.sizei, POINTER(t.enum), t.sizei, POINTER(t.sizei), POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_program_resourceiv(program, programinterface, index, propcount, props, bufsize, length, params):
pass
@accepts(t.uint, t.enum, t.char_p)
@returns(t.int)
@binds(dll)
def get_program_resource_location(program, programinterface, name):
'''
query the location of a named resource within a program.
gl.get_program_resource_location returns the location assigned to the
variable named name in interface programInterface of program object program.
program must be the name of a program that has been linked successfully.
programInterface must be one of gl.UNIFORM, gl.PROGRAM_INPUT,
gl.PROGRAM_OUTPUT, gl.VERTEX_SUBROUTINE_UNIFORM,
gl.TESS_CONTROL_SUBROUTINE_UNIFORM, gl.TESS_EVALUATION_SUBROUTINE_UNIFORM,
gl.GEOMETRY_SUBROUTINE_UNIFORM, gl.FRAGMENT_SUBROUTINE_UNIFORM,
gl.COMPUTE_SUBROUTINE_UNIFORM, or gl.TRANSFORM_FEEDBACK_BUFFER.
Args:
program: the name of a program object whose resources to query.
programinterface: a token identifying the interface within program
containing the resource named name.
name: the name of the resource to query the location of.
'''
@accepts(t.uint, t.enum, t.char_p)
@returns(t.int)
@binds(dll)
def get_program_resource_location_index(program, programinterface, name):
'''
query the fragment color index of a named variable within a program.
gl.get_program_resource_location_index returns the fragment color index
assigned to the variable named name in interface programInterface of program
object program. program must be the name of a program that has been linked
successfully. programInterface must be gl.PROGRAM_OUTPUT.
Args:
program: the name of a program object whose resources to query.
programinterface: a token identifying the interface within program
containing the resource named name.
name: the name of the resource to query the location of.
'''
@accepts(t.uint, t.uint, t.uint)
@returns(t.void)
@binds(dll)
def shader_storage_block_binding(program, storageblockindex, storageblockbinding):
'''
change an active shader storage block binding.
gl.shader_storage_block_binding, changes the active shader storage block
with an assigned index of storageBlockIndex in program object program.
storageBlockIndex must be an active shader storage block index in program.
storageBlockBinding must be less than the value of
gl.MAX_SHADER_STORAGE_BUFFER_BINDINGS. If successful,
gl.shader_storage_binding specifies that program will use the data store of
the buffer object bound to the binding point storageBlockBinding to read and
write the values of the buffer variables in the shader storage block
identified by storageBlockIndex.
Args:
program: the name of the program containing the block whose binding to
change.
storageblockindex: the index storage block within the program.
storageblockbinding: the index storage block binding to associate with
the specified storage block.
'''
@accepts(t.enum, t.enum, t.uint, t.intptr, t.sizeiptr)
@returns(t.void)
@binds(dll)
def tex_buffer_range(target, internalformat, buffer, offset, size):
'''
attach a range of a buffer object's data store to a buffer texture object.
gl.tex_buffer_range and gl.texture_buffer_range attach a range of the data
store of a specified buffer object to a specified texture object, and
specify the storage format for the texture image found found in the buffer
object. The texture object must be a buffer texture.
Args:
target: the target to which the texture object is bound for
gltexbufferrange.
internalformat: the internal format of the data in the store belonging
to buffer.
buffer: the name of the buffer object whose storage to attach to the
active buffer texture.
offset: the offset of the start of the range of the buffer's data store
to attach.
size: the size of the range of the buffer's data store to attach.
'''
@accepts(t.enum, t.sizei, t.enum, t.sizei, t.sizei, t.boolean)
@returns(t.void)
@binds(dll)
def tex_storage2_d_multisample(target, samples, internalformat, width, height, fixedsamplelocations):
'''
specify storage for a two-dimensional multisample texture.
Args:
target: the target to which the texture object is bound for
gltexstorage2dmultisample.
samples: the number of samples in the texture.
internalformat: the sized internal format to be used to store texture
image data.
width: the width of the texture, in texels.
height: the height of the texture, in texels.
fixedsamplelocations: whether the image will use identical sample
locations and the same number of samples for all texels in the
image, and the sample locations will not depend on the internal
format or size of the image.
'''
@accepts(t.enum, t.sizei, t.enum, t.sizei, t.sizei, t.sizei, t.boolean)
@returns(t.void)
@binds(dll)
def tex_storage3_d_multisample(target, samples, internalformat, width, height, depth, fixedsamplelocations):
'''
specify storage for a two-dimensional multisample array texture.
Args:
target: the target to which the texture object is bound for
gltexstorage3dmultisample.
samples: the number of samples in the texture.
internalformat: the sized internal format to be used to store texture
image data.
width: the width of the texture, in texels.
height: the height of the texture, in texels.
depth: the depth of the texture, in layers.
fixedsamplelocations: whether the image will use identical sample
locations and the same number of samples for all texels in the
image, and the sample locations will not depend on the internal
format or size of the image.
'''
@accepts(t.uint, t.enum, t.uint, t.enum, t.uint, t.uint, t.uint, t.uint)
@returns(t.void)
@binds(dll)
def texture_view(texture, target, origtexture, internalformat, minlevel, numlevels, minlayer, numlayers):
'''
initialize a texture as a data alias of another texture's data store.
gl.texture_view initializes a texture object as an alias, or view of another
texture object, sharing some or all of the parent texture's data store with
the initialized texture. texture specifies a name previously reserved by a
successful call to gl.gen_textures but that has not yet been bound or given
a target.
Args:
texture: the texture object to be initialized as a view.
target: the target to be used for the newly initialized texture.
origtexture: the name of a texture object of which to make a view.
minlevel: lowest level of detail of the view.
numlevels: the number of levels of detail to include in the view.
minlayer: the index of the first layer to include in the view.
numlayers: the number of layers to include in the view.
'''
@accepts(t.uint, t.uint, t.intptr, t.sizei)
@returns(t.void)
@binds(dll)
def bind_vertex_buffer(bindingindex, buffer, offset, stride):
'''
bind a buffer to a vertex buffer bind point.
gl.bind_vertex_buffer and gl.vertex_array_vertex_buffer bind the buffer
named buffer to the vertex buffer binding point whose index is given by
bindingindex. gl.bind_vertex_buffer modifies the binding of the currently
bound vertex array object, whereas gl.vertex_array_vertex_buffer allows the
caller to specify ID of the vertex array object with an argument named
vaobj, for which the binding should be modified. offset and stride specify
the offset of the first element within the buffer and the distance between
elements within the buffer, respectively, and are both measured in basic
machine units. bindingindex must be less than the value of
gl.MAX_VERTEX_ATTRIB_BINDINGS.
Args:
bindingindex: the index of the vertex buffer binding point to which to
bind the buffer.
buffer: the name of a buffer to bind to the vertex buffer binding point.
offset: the offset of the first element of the buffer.
stride: the distance between elements within the buffer.
'''
@accepts(t.uint, t.int, t.enum, t.boolean, t.uint)
@returns(t.void)
@binds(dll)
def vertex_attrib_format(attribindex, size, type, normalized, relativeoffset):
'''
specify the organization of vertex arrays.
gl.vertex_attrib_format, gl.vertex_attrib_i_format and
gl.vertex_attrib_l_format, as well as gl.vertex_array_attrib_format,
gl.vertex_array_attrib_i_format and gl.vertex_array_attrib_l_format specify
the organization of data in vertex arrays. The first three calls operate on
the bound vertex array object, whereas the last three ones modify the state
of a vertex array object with ID vaobj. attribindex specifies the index of
the generic vertex attribute array whose data layout is being described, and
must be less than the value of gl.MAX_VERTEX_ATTRIBS.
Args:
attribindex: the generic vertex attribute array being described.
size: the number of values per vertex that are stored in the array.
type: the type of the data stored in the array.
normalized: the distance between elements within the buffer.
relativeoffset: the distance between elements within the buffer.
'''
@accepts(t.uint, t.int, t.enum, t.uint)
@returns(t.void)
@binds(dll)
def vertex_attrib_i_format(attribindex, size, type, relativeoffset):
pass
@accepts(t.uint, t.int, t.enum, t.uint)
@returns(t.void)
@binds(dll)
def vertex_attrib_l_format(attribindex, size, type, relativeoffset):
pass
@accepts(t.uint, t.uint)
@returns(t.void)
@binds(dll)
def vertex_attrib_binding(attribindex, bindingindex):
'''
associate a vertex attribute and a vertex buffer binding for a vertex array
object.
gl.vertex_attrib_binding and gl.vertex_array_attrib_binding establishes an
association between the generic vertex attribute of a vertex array object
whose index is given by attribindex, and a vertex buffer binding whose index
is given by bindingindex. For gl.vertex_attrib_binding, the vertex array
object affected is that currently bound. For gl.vertex_array_attrib_binding,
vaobj is the name of the vertex array object.
Args:
attribindex: the index of the attribute to associate with a vertex
buffer binding.
bindingindex: the index of the vertex buffer binding with which to
associate the generic vertex attribute.
'''
@accepts(t.uint, t.uint)
@returns(t.void)
@binds(dll)
def vertex_binding_divisor(bindingindex, divisor):
'''
modify the rate at which generic vertex attributes advance.
gl.vertex_binding_divisor and gl.vertex_array_binding_divisor modify the
rate at which generic vertex attributes advance when rendering multiple
instances of primitives in a single draw command. If divisor is zero, the
attributes using the buffer bound to bindingindex advance once per vertex.
If divisor is non-zero, the attributes advance once per divisor instances of
the set of vertices being rendered. An attribute is referred to as instanced
if the corresponding divisor value is non-zero.
Args:
bindingindex: the index of the binding whose divisor to modify.
divisor: the new value for the instance step rate to apply.
'''
@accepts(t.enum, t.enum, t.enum, t.sizei, POINTER(t.uint), t.boolean)
@returns(t.void)
@binds(dll)
def debug_message_control(source, type, severity, count, ids, enabled):
'''
control the reporting of debug messages in a debug context.
gl.debug_message_control controls the reporting of debug messages generated
by a debug context. The parameters source, type and severity form a filter
to select messages from the pool of potential messages generated by the GL.
Args:
source: the source of debug messages to enable or disable.
type: the type of debug messages to enable or disable.
severity: the severity of debug messages to enable or disable.
count: the length of the array ids.
ids: the address of an array of unsigned integers contianing the ids of
the messages to enable or disable.
enabled: a boolean flag determining whether the selected messages should
be enabled or disabled.
'''
@accepts(t.enum, t.enum, t.uint, t.enum, t.sizei, t.char_p)
@returns(t.void)
@binds(dll)
def debug_message_insert(source, type, id, severity, length, buf):
'''
inject an application-supplied message into the debug message queue.
gl.debug_message_insert inserts a user-supplied message into the debug
output queue. source specifies the source that will be used to classify the
message and must be gl.DEBUG_SOURCE_APPLICATION or
gl.DEBUG_SOURCE_THIRD_PARTY. All other sources are reserved for use by the
GL implementation. type indicates the type of the message to be inserted and
may be one of gl.DEBUG_TYPE_ERROR, gl.DEBUG_TYPE_DEPRECATED_BEHAVIOR,
gl.DEBUG_TYPE_UNDEFINED_BEHAVIOR, gl.DEBUG_TYPE_PORTABILITY,
gl.DEBUG_TYPE_PERFORMANCE, gl.DEBUG_TYPE_MARKER, gl.DEBUG_TYPE_PUSH_GROUP,
gl.DEBUG_TYPE_POP_GROUP, or gl.DEBUG_TYPE_OTHER.
Args:
source: the source of the debug message to insert.
type: the type of the debug message insert.
id: the user-supplied identifier of the message to insert.
severity: the severity of the debug messages to insert.
length: the length string contained in the character array whose address
is given by message.
buf: the address of a character array containing the message to insert.
'''
@accepts(t.DEBUGPROC, t.void)
@returns(t.void)
@binds(dll)
def debug_message_callback(callback, userparam):
'''
specify a callback to receive debugging messages from the GL.
gl.debug_message_callback sets the current debug output callback function to
the function whose address is given in callback.
Args:
callback: the address of a callback function that will be called when a
debug message is generated.
userparam: a user supplied pointer that will be passed on each
invocation of callback.
'''
@accepts(t.uint, t.sizei, POINTER(t.enum), POINTER(t.enum), POINTER(t.uint), POINTER(t.enum), POINTER(t.sizei), t.char_p)
@returns(t.uint)
@binds(dll)
def get_debug_message_log(count, bufsize, sources, types, ids, severities, lengths, messagelog):
'''
retrieve messages from the debug message log.
gl.get_debug_message_log retrieves messages from the debug message log. A
maximum of count messages are retrieved from the log. If sources is not None
then the source of each message is written into up to count elements of the
array. If types is not None then the type of each message is written into up
to count elements of the array.
Args:
count: the number of debug messages to retrieve from the log.
bufsize: the size of the buffer whose address is given by messagelog.
sources: the address of an array of variables to receive the sources of
the retrieved messages.
types: the address of an array of variables to receive the types of the
retrieved messages.
ids: the address of an array of unsigned integers to receive the ids of
the retrieved messages.
severities: the address of an array of variables to receive the
severites of the retrieved messages.
lengths: the address of an array of variables to receive the lengths of
the received messages.
messagelog: the address of an array of characters that will receive the
messages.
'''
@accepts(t.enum, t.uint, t.sizei, t.char_p)
@returns(t.void)
@binds(dll)
def push_debug_group(source, id, length, message):
'''
push a named debug group into the command stream.
gl.push_debug_group pushes a debug group described by the string message
into the command stream. The value of id specifies the ID of messages
generated. The parameter length contains the number of characters in
message. If length is negative, it is implied that message contains a null
terminated string.
Args:
source: the source of the debug message.
id: the identifier of the message.
length: the length of the message to be sent to the debug output stream.
message: the a string containing the message to be sent to the debug
output stream.
'''
@accepts()
@returns(t.void)
@binds(dll)
def pop_debug_group():
'''
pop the active debug group.
'''
@accepts(t.enum, t.uint, t.sizei, t.char_p)
@returns(t.void)
@binds(dll)
def object_label(identifier, name, length, label):
'''
label a named object identified within a namespace.
gl.object_label labels the object identified by name within the namespace
given by identifier. identifier must be one of gl.BUFFER, gl.SHADER,
gl.PROGRAM, gl.VERTEX_ARRAY, gl.QUERY, gl.PROGRAM_PIPELINE,
gl.TRANSFORM_FEEDBACK, gl.SAMPLER, gl.TEXTURE, gl.RENDERBUFFER,
gl.FRAMEBUFFER, to indicate the namespace containing the names of buffers,
shaders, programs, vertex array objects, query objects, program pipelines,
transform feedback objects, samplers, textures, renderbuffers and frame
buffers, respectively.
Args:
identifier: the namespace from which the name of the object is
allocated.
name: the name of the object to label.
length: the length of the label to be used for the object.
label: the address of a string containing the label to assign to the
object.
'''
@accepts(t.enum, t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_object_label(identifier, name, bufsize, length, label):
'''
retrieve the label of a named object identified within a namespace.
gl.get_object_label retrieves the label of the object identified by name
within the namespace given by identifier. identifier must be one of
gl.BUFFER, gl.SHADER, gl.PROGRAM, gl.VERTEX_ARRAY, gl.QUERY,
gl.PROGRAM_PIPELINE, gl.TRANSFORM_FEEDBACK, gl.SAMPLER, gl.TEXTURE,
gl.RENDERBUFFER, gl.FRAMEBUFFER, to indicate the namespace containing the
names of buffers, shaders, programs, vertex array objects, query objects,
program pipelines, transform feedback objects, samplers, textures,
renderbuffers and frame buffers, respectively.
Args:
identifier: the namespace from which the name of the object is
allocated.
name: the name of the object whose label to retrieve.
length: the address of a variable to receive the length of the object
label.
label: the address of a string that will receive the object label.
'''
@accepts(t.void, t.sizei, t.char_p)
@returns(t.void)
@binds(dll)
def object_ptr_label(ptr, length, label):
'''
label a a sync object identified by a pointer.
gl.object_ptr_label labels the sync object identified by ptr.
Args:
ptr: a pointer identifying a sync object.
length: the length of the label to be used for the object.
label: the address of a string containing the label to assign to the
object.
'''
@accepts(t.void, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_object_ptr_label(ptr, bufsize, length, label):
'''
retrieve the label of a sync object identified by a pointer.
gl.get_object_ptr_label retrieves the label of the sync object identified by
ptr.
Args:
ptr: the name of the sync object whose label to retrieve.
length: the address of a variable to receive the length of the object
label.
label: the address of a string that will receive the object label.
'''
@accepts(t.enum, t.void)
@returns(t.void)
@binds(dll)
def get_pointerv(pname, params):
'''
return the address of the specified pointer.
gl.get_pointerv returns pointer information. pname indicates the pointer to
be returned, and params is a pointer to a location in which to place the
returned data.
Args:
pname: the pointer to be returned.
params: returns the pointer value specified by pname.
'''
DISPLAY_LIST = 0x82E7
@accepts(t.enum, t.void)
@returns(t.void)
@binds(dll)
def get_pointerv(pname, params):
'''
return the address of the specified pointer.
gl.get_pointerv returns pointer information. pname indicates the pointer to
be returned, and params is a pointer to a location in which to place the
returned data.
Args:
pname: the pointer to be returned.
params: returns the pointer value specified by pname.
'''
STACK_UNDERFLOW = 0x0504
STACK_OVERFLOW = 0x0503
| 39.732811
| 151
| 0.7412
|
8a7d81f9fd3f30534398ff05abd7412a6f78b709
| 4,035
|
py
|
Python
|
MarkReport/MarkReport.py
|
dedukun/MarkReport
|
2d92c87a69db5868d14b7a59e815b9ee72d439f9
|
[
"MIT"
] | null | null | null |
MarkReport/MarkReport.py
|
dedukun/MarkReport
|
2d92c87a69db5868d14b7a59e815b9ee72d439f9
|
[
"MIT"
] | null | null | null |
MarkReport/MarkReport.py
|
dedukun/MarkReport
|
2d92c87a69db5868d14b7a59e815b9ee72d439f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Command line flags
import os
import glob
import re
import pyinotify
import subprocess
from sys import stdout, stderr
from time import time, sleep
from tempfile import gettempdir
from distutils.dir_util import copy_tree
from shutil import copyfile
from weasyprint import HTML
import argparse
parser = argparse.ArgumentParser(
description='Converts Markdown to elegant PDF reports')
parser.add_argument('--basic', dest='basic', action='store_true',
help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)')
parser.add_argument('--watch', dest='watch', action='store_true',
help='Watch the current folder for changes and rebuild automatically')
parser.add_argument('--quiet', dest='quiet', action='store_true',
help='Do not output any information')
parser.add_argument("--timeout", type=int, default=2,
help='Page generation timeout')
parser.add_argument("--base-html", type=str, default="",
help='The path to the base HTML file')
parser.set_defaults(watch=False)
args = parser.parse_args()
# Check directory
ok = False
for file in os.listdir("."):
if file.endswith(".md"):
ok = True
break
if not ok:
stderr.write("No markdown file found in the current folder")
exit(1)
if args.base_html != "":
if not os.path.isfile(args.base_html):
stderr.write("The given base HTML file doesn't exist")
exit(1)
script_path = os.path.dirname(os.path.realpath(__file__))
# Temp dir
timestamp = str(int(time()))
tmp_dir = gettempdir() + "/" + timestamp + "_md-report/"
os.makedirs(tmp_dir, exist_ok=True)
# Headless browser
if not args.basic:
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = Options()
options.headless = True
options.log.level = "trace"
d = DesiredCapabilities.FIREFOX
d['loggingPrefs'] = {'browser': 'ALL'}
driver = webdriver.Firefox(options=options, capabilities=d)
driver.set_page_load_timeout(args.timeout)
prev_compile_time = 0
def recompile(notifier):
if notifier is not None and (notifier.maskname != "IN_MODIFY" or notifier.pathname.endswith(".pdf")):
return
global prev_compile_time
if time() - prev_compile_time < 1:
return
prev_compile_time = time()
if not args.quiet:
stdout.write("\rBuilding the PDF file...")
stdout.flush()
files = glob.glob(tmp_dir + '/*.md')
for f in files:
os.remove(f)
if args.base_html == "":
copyfile(script_path + "/base.html", tmp_dir + "/base.html")
else:
copyfile(args.base_html, tmp_dir + "/base.html")
if not os.path.islink(tmp_dir + "/src"):
os.symlink(script_path + "/src", tmp_dir + "/src")
copy_tree(".", tmp_dir)
# Markdown parsing
subprocess.check_output(script_path + "/md-parsing " +
tmp_dir, shell=True).decode('utf-8')
html_file_name = tmp_dir + "output.html"
# Interpret JS code
if not args.basic:
driver.get("file:///" + html_file_name)
sleep(2)
elem = driver.find_element_by_xpath("//*")
interpreted_html = elem.get_attribute("outerHTML")
with open(html_file_name, "w") as html_out_file:
html_out_file.write(interpreted_html)
# Create final PDF file
pdf = HTML(html_file_name).write_pdf()
f = open("output.pdf", 'wb')
f.write(pdf)
if not args.quiet:
stdout.write("\rDone. ")
stdout.flush()
recompile(None)
if not args.watch:
if not args.basic:
driver.quit()
exit(0)
watch_manager = pyinotify.WatchManager()
event_notifier = pyinotify.Notifier(watch_manager, recompile)
watch_manager.add_watch(os.path.abspath("."), pyinotify.ALL_EVENTS, rec=True)
event_notifier.loop()
if not args.basic:
driver.quit()
| 27.827586
| 105
| 0.662949
|
efca9e6f2e6b4e27221c8a6c5a0028b7a586d7e2
| 16,097
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/ami.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/ami.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/ami.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Ami(pulumi.CustomResource):
architecture: pulumi.Output[str]
"""
Machine architecture for created instances. Defaults to "x86_64".
"""
description: pulumi.Output[str]
"""
A longer, human-readable description for the AMI.
"""
ebs_block_devices: pulumi.Output[list]
"""
Nested block describing an EBS block device that should be
attached to created instances. The structure of this block is described below.
* `deleteOnTermination` (`bool`) - Boolean controlling whether the EBS volumes created to
support each created instance will be deleted once that instance is terminated.
* `device_name` (`str`) - The path at which the device is exposed to created instances.
* `encrypted` (`bool`) - Boolean controlling whether the created EBS volumes will be encrypted. Can't be used with `snapshot_id`.
* `iops` (`float`) - Number of I/O operations per second the
created volumes will support.
* `snapshot_id` (`str`) - The id of an EBS snapshot that will be used to initialize the created
EBS volumes. If set, the `volume_size` attribute must be at least as large as the referenced
snapshot.
* `volume_size` (`float`) - The size of created volumes in GiB.
If `snapshot_id` is set and `volume_size` is omitted then the volume will have the same size
as the selected snapshot.
* `volumeType` (`str`) - The type of EBS volume to create. Can be one of "standard" (the
default), "io1" or "gp2".
"""
ena_support: pulumi.Output[bool]
"""
Specifies whether enhanced networking with ENA is enabled. Defaults to `false`.
"""
ephemeral_block_devices: pulumi.Output[list]
"""
Nested block describing an ephemeral block device that
should be attached to created instances. The structure of this block is described below.
* `device_name` (`str`) - The path at which the device is exposed to created instances.
* `virtualName` (`str`) - A name for the ephemeral device, of the form "ephemeralN" where
*N* is a volume number starting from zero.
"""
image_location: pulumi.Output[str]
"""
Path to an S3 object containing an image manifest, e.g. created
by the `ec2-upload-bundle` command in the EC2 command line tools.
"""
kernel_id: pulumi.Output[str]
"""
The id of the kernel image (AKI) that will be used as the paravirtual
kernel in created instances.
"""
manage_ebs_snapshots: pulumi.Output[bool]
name: pulumi.Output[str]
"""
A region-unique name for the AMI.
"""
ramdisk_id: pulumi.Output[str]
"""
The id of an initrd image (ARI) that will be used when booting the
created instances.
"""
root_device_name: pulumi.Output[str]
"""
The name of the root device (for example, `/dev/sda1`, or `/dev/xvda`).
"""
root_snapshot_id: pulumi.Output[str]
"""
The Snapshot ID for the root volume (for EBS-backed AMIs)
"""
sriov_net_support: pulumi.Output[str]
"""
When set to "simple" (the default), enables enhanced networking
for created instances. No other value is supported at this time.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
virtualization_type: pulumi.Output[str]
"""
Keyword to choose what virtualization mode created instances
will use. Can be either "paravirtual" (the default) or "hvm". The choice of virtualization type
changes the set of further arguments that are required, as described below.
"""
def __init__(__self__, resource_name, opts=None, architecture=None, description=None, ebs_block_devices=None, ena_support=None, ephemeral_block_devices=None, image_location=None, kernel_id=None, name=None, ramdisk_id=None, root_device_name=None, sriov_net_support=None, tags=None, virtualization_type=None, __props__=None, __name__=None, __opts__=None):
"""
The AMI resource allows the creation and management of a completely-custom
*Amazon Machine Image* (AMI).
If you just want to duplicate an existing AMI, possibly copying it to another
region, it's better to use `ec2.AmiCopy` instead.
If you just want to share an existing AMI with another AWS account,
it's better to use `ec2.AmiLaunchPermission` instead.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] architecture: Machine architecture for created instances. Defaults to "x86_64".
:param pulumi.Input[str] description: A longer, human-readable description for the AMI.
:param pulumi.Input[list] ebs_block_devices: Nested block describing an EBS block device that should be
attached to created instances. The structure of this block is described below.
:param pulumi.Input[bool] ena_support: Specifies whether enhanced networking with ENA is enabled. Defaults to `false`.
:param pulumi.Input[list] ephemeral_block_devices: Nested block describing an ephemeral block device that
should be attached to created instances. The structure of this block is described below.
:param pulumi.Input[str] image_location: Path to an S3 object containing an image manifest, e.g. created
by the `ec2-upload-bundle` command in the EC2 command line tools.
:param pulumi.Input[str] kernel_id: The id of the kernel image (AKI) that will be used as the paravirtual
kernel in created instances.
:param pulumi.Input[str] name: A region-unique name for the AMI.
:param pulumi.Input[str] ramdisk_id: The id of an initrd image (ARI) that will be used when booting the
created instances.
:param pulumi.Input[str] root_device_name: The name of the root device (for example, `/dev/sda1`, or `/dev/xvda`).
:param pulumi.Input[str] sriov_net_support: When set to "simple" (the default), enables enhanced networking
for created instances. No other value is supported at this time.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] virtualization_type: Keyword to choose what virtualization mode created instances
will use. Can be either "paravirtual" (the default) or "hvm". The choice of virtualization type
changes the set of further arguments that are required, as described below.
The **ebs_block_devices** object supports the following:
* `deleteOnTermination` (`pulumi.Input[bool]`) - Boolean controlling whether the EBS volumes created to
support each created instance will be deleted once that instance is terminated.
* `device_name` (`pulumi.Input[str]`) - The path at which the device is exposed to created instances.
* `encrypted` (`pulumi.Input[bool]`) - Boolean controlling whether the created EBS volumes will be encrypted. Can't be used with `snapshot_id`.
* `iops` (`pulumi.Input[float]`) - Number of I/O operations per second the
created volumes will support.
* `snapshot_id` (`pulumi.Input[str]`) - The id of an EBS snapshot that will be used to initialize the created
EBS volumes. If set, the `volume_size` attribute must be at least as large as the referenced
snapshot.
* `volume_size` (`pulumi.Input[float]`) - The size of created volumes in GiB.
If `snapshot_id` is set and `volume_size` is omitted then the volume will have the same size
as the selected snapshot.
* `volumeType` (`pulumi.Input[str]`) - The type of EBS volume to create. Can be one of "standard" (the
default), "io1" or "gp2".
The **ephemeral_block_devices** object supports the following:
* `device_name` (`pulumi.Input[str]`) - The path at which the device is exposed to created instances.
* `virtualName` (`pulumi.Input[str]`) - A name for the ephemeral device, of the form "ephemeralN" where
*N* is a volume number starting from zero.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['architecture'] = architecture
__props__['description'] = description
__props__['ebs_block_devices'] = ebs_block_devices
__props__['ena_support'] = ena_support
__props__['ephemeral_block_devices'] = ephemeral_block_devices
__props__['image_location'] = image_location
__props__['kernel_id'] = kernel_id
__props__['name'] = name
__props__['ramdisk_id'] = ramdisk_id
__props__['root_device_name'] = root_device_name
__props__['sriov_net_support'] = sriov_net_support
__props__['tags'] = tags
__props__['virtualization_type'] = virtualization_type
__props__['manage_ebs_snapshots'] = None
__props__['root_snapshot_id'] = None
super(Ami, __self__).__init__(
'aws:ec2/ami:Ami',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, architecture=None, description=None, ebs_block_devices=None, ena_support=None, ephemeral_block_devices=None, image_location=None, kernel_id=None, manage_ebs_snapshots=None, name=None, ramdisk_id=None, root_device_name=None, root_snapshot_id=None, sriov_net_support=None, tags=None, virtualization_type=None):
"""
Get an existing Ami resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] architecture: Machine architecture for created instances. Defaults to "x86_64".
:param pulumi.Input[str] description: A longer, human-readable description for the AMI.
:param pulumi.Input[list] ebs_block_devices: Nested block describing an EBS block device that should be
attached to created instances. The structure of this block is described below.
:param pulumi.Input[bool] ena_support: Specifies whether enhanced networking with ENA is enabled. Defaults to `false`.
:param pulumi.Input[list] ephemeral_block_devices: Nested block describing an ephemeral block device that
should be attached to created instances. The structure of this block is described below.
:param pulumi.Input[str] image_location: Path to an S3 object containing an image manifest, e.g. created
by the `ec2-upload-bundle` command in the EC2 command line tools.
:param pulumi.Input[str] kernel_id: The id of the kernel image (AKI) that will be used as the paravirtual
kernel in created instances.
:param pulumi.Input[str] name: A region-unique name for the AMI.
:param pulumi.Input[str] ramdisk_id: The id of an initrd image (ARI) that will be used when booting the
created instances.
:param pulumi.Input[str] root_device_name: The name of the root device (for example, `/dev/sda1`, or `/dev/xvda`).
:param pulumi.Input[str] root_snapshot_id: The Snapshot ID for the root volume (for EBS-backed AMIs)
:param pulumi.Input[str] sriov_net_support: When set to "simple" (the default), enables enhanced networking
for created instances. No other value is supported at this time.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] virtualization_type: Keyword to choose what virtualization mode created instances
will use. Can be either "paravirtual" (the default) or "hvm". The choice of virtualization type
changes the set of further arguments that are required, as described below.
The **ebs_block_devices** object supports the following:
* `deleteOnTermination` (`pulumi.Input[bool]`) - Boolean controlling whether the EBS volumes created to
support each created instance will be deleted once that instance is terminated.
* `device_name` (`pulumi.Input[str]`) - The path at which the device is exposed to created instances.
* `encrypted` (`pulumi.Input[bool]`) - Boolean controlling whether the created EBS volumes will be encrypted. Can't be used with `snapshot_id`.
* `iops` (`pulumi.Input[float]`) - Number of I/O operations per second the
created volumes will support.
* `snapshot_id` (`pulumi.Input[str]`) - The id of an EBS snapshot that will be used to initialize the created
EBS volumes. If set, the `volume_size` attribute must be at least as large as the referenced
snapshot.
* `volume_size` (`pulumi.Input[float]`) - The size of created volumes in GiB.
If `snapshot_id` is set and `volume_size` is omitted then the volume will have the same size
as the selected snapshot.
* `volumeType` (`pulumi.Input[str]`) - The type of EBS volume to create. Can be one of "standard" (the
default), "io1" or "gp2".
The **ephemeral_block_devices** object supports the following:
* `device_name` (`pulumi.Input[str]`) - The path at which the device is exposed to created instances.
* `virtualName` (`pulumi.Input[str]`) - A name for the ephemeral device, of the form "ephemeralN" where
*N* is a volume number starting from zero.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["architecture"] = architecture
__props__["description"] = description
__props__["ebs_block_devices"] = ebs_block_devices
__props__["ena_support"] = ena_support
__props__["ephemeral_block_devices"] = ephemeral_block_devices
__props__["image_location"] = image_location
__props__["kernel_id"] = kernel_id
__props__["manage_ebs_snapshots"] = manage_ebs_snapshots
__props__["name"] = name
__props__["ramdisk_id"] = ramdisk_id
__props__["root_device_name"] = root_device_name
__props__["root_snapshot_id"] = root_snapshot_id
__props__["sriov_net_support"] = sriov_net_support
__props__["tags"] = tags
__props__["virtualization_type"] = virtualization_type
return Ami(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 58.322464
| 357
| 0.68373
|
3dbf1841410630bd6fac85dc89f71eeb66d73349
| 11,039
|
py
|
Python
|
lemur/tests/conf.py
|
dck25/lemur
|
32dda00f9fb4b72091bb3f4e7be1d262ea5fd857
|
[
"Apache-2.0"
] | 1,656
|
2015-09-20T03:12:28.000Z
|
2022-03-29T18:00:54.000Z
|
lemur/tests/conf.py
|
dck25/lemur
|
32dda00f9fb4b72091bb3f4e7be1d262ea5fd857
|
[
"Apache-2.0"
] | 3,017
|
2015-09-18T23:15:24.000Z
|
2022-03-30T22:40:02.000Z
|
lemur/tests/conf.py
|
hosseinsh/lemur
|
fbf50b365cb0f7a0e9cae31dec1b853b958c45bb
|
[
"Apache-2.0"
] | 401
|
2015-09-18T23:02:18.000Z
|
2022-02-20T16:13:14.000Z
|
# This is just Python which means you can inherit and tweak settings
import base64
import os
import random
import string
_basedir = os.path.abspath(os.path.dirname(__file__))
# generate random secrets for unittest
def get_random_secret(length):
secret_key = ''.join(random.choice(string.ascii_uppercase) for x in range(round(length / 4)))
secret_key = secret_key + ''.join(random.choice("~!@#$%^&*()_+") for x in range(round(length / 4)))
secret_key = secret_key + ''.join(random.choice(string.ascii_lowercase) for x in range(round(length / 4)))
return secret_key + ''.join(random.choice(string.digits) for x in range(round(length / 4)))
THREADS_PER_PAGE = 8
# General
# These will need to be set to `True` if you are developing locally
CORS = False
debug = False
TESTING = True
# this is the secret key used by flask session management (utf8 encoded)
SECRET_KEY = get_random_secret(length=32).encode('utf8')
# You should consider storing these separately from your config (should be URL-safe)
LEMUR_TOKEN_SECRET = "test"
LEMUR_ENCRYPTION_KEYS = base64.urlsafe_b64encode(get_random_secret(length=32).encode('utf8'))
# this is the secret used to generate oauth state tokens
OAUTH_STATE_TOKEN_SECRET = base64.b64encode(get_random_secret(32).encode('utf8'))
OAUTH_STATE_TOKEN_STALE_TOLERANCE_SECONDS = 15
# List of domain regular expressions that non-admin users can issue
LEMUR_ALLOWED_DOMAINS = [
r"^[a-zA-Z0-9-]+\.example\.com$",
r"^[a-zA-Z0-9-]+\.example\.org$",
r"^example\d+\.long\.com$",
]
# Mail Server
# Lemur currently only supports SES for sending email, this address
# needs to be verified
LEMUR_EMAIL = "lemur@example.com"
LEMUR_SECURITY_TEAM_EMAIL = ["security@example.com"]
LEMUR_HOSTNAME = "lemur.example.com"
# Logging
LOG_LEVEL = "DEBUG"
LOG_FILE = "lemur.log"
LEMUR_DEFAULT_COUNTRY = "US"
LEMUR_DEFAULT_STATE = "California"
LEMUR_DEFAULT_LOCATION = "Los Gatos"
LEMUR_DEFAULT_ORGANIZATION = "Example, Inc."
LEMUR_DEFAULT_ORGANIZATIONAL_UNIT = "Example"
LEMUR_ALLOW_WEEKEND_EXPIRATION = False
# needed for test_certificates
LEMUR_PORTS_FOR_DEPLOYED_CERTIFICATE_CHECK = [443, 65521, 65522, 65523, 65524]
# needed for test_messaging
LEMUR_REISSUE_NOTIFICATION_EXCLUDED_DESTINATIONS = ['excluded-destination']
# Database
# modify this if you are not using a local database. Do not use any development or production DBs,
# as Unit Tests drop the whole schema, recreate and again drop everything at the end
SQLALCHEMY_DATABASE_URI = os.getenv(
"SQLALCHEMY_DATABASE_URI", "postgresql://lemur:lemur@localhost:5432/lemur"
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# AWS
LEMUR_INSTANCE_PROFILE = "Lemur"
# Issuers
# These will be dependent on which 3rd party that Lemur is
# configured to use.
# CLOUDCA_URL = ''
# CLOUDCA_PEM_PATH = ''
# CLOUDCA_BUNDLE = ''
# number of years to issue if not specified
# CLOUDCA_DEFAULT_VALIDITY = 2
DIGICERT_URL = "mock://www.digicert.com"
DIGICERT_ORDER_TYPE = "ssl_plus"
DIGICERT_API_KEY = "api-key"
DIGICERT_ORG_ID = 111111
DIGICERT_ROOT = "ROOT"
DIGICERT_CIS_URL = "mock://www.digicert.com"
DIGICERT_CIS_PROFILE_NAMES = {"sha2-rsa-ecc-root": "ssl_plus"}
DIGICERT_CIS_API_KEY = "api-key"
DIGICERT_CIS_ROOTS = {"root": "ROOT"}
VERISIGN_URL = "http://example.com"
VERISIGN_PEM_PATH = "~/"
VERISIGN_FIRST_NAME = "Jim"
VERISIGN_LAST_NAME = "Bob"
VERSIGN_EMAIL = "jim@example.com"
ACME_AWS_ACCOUNT_NUMBER = "11111111111"
ACME_PRIVATE_KEY = """
-----BEGIN RSA PRIVATE KEY-----
MIIJJwIBAAKCAgEA0+jySNCc1i73LwDZEuIdSkZgRYQ4ZQVIioVf38RUhDElxy51
4gdWZwp8/TDpQ8cVXMj6QhdRpTVLluOz71hdvBAjxXTISRCRlItzizTgBD9CLXRh
vPLIMPvAJH7JZxp9xW5oVYUcHBveQJ5tQvnP7RgPykejl7DPKm/SGKYealnoGPcP
U9ipz2xXlVlx7ZKivLbaijh2kD/QE9pC//CnP31g3QFCsxOTLAWtICz5VbvaWuTT
whqFs5cT3kKYAW/ccPcty573AX/9Y/UZ4+B3wxXY3/6GYPMcINRuu/7Srs3twlNu
udoTNdM9SztWMYUzz1SMYad9v9LLGTrv+5Tog4YsqMFxyKrBBBz8/bf1lKwyfAW+
okvVe+1bUY8iSDuDx1O0iMyHe5w8lxsoTy91ujjr1cQDyJR70TKQpeBmfNtBVnW+
D8E6Xw2yCuL9XTyBApldzQ/J1ObPd1Hv+yzhEx4VD9QOmQPn7doiapTDYfW51o1O
Mo+zuZgsclhePvzqN4/6VYXZnPE68uqx982u0W82tCorRUtzfFoO0plNRCjmV7cw
0fp0ie3VczUOH9gj4emmdQd1tVA/Esuh3XnzZ2ANwohtPytn+I3MX0Q+5k7AcRlt
AyI80x8CSiDStI6pj3BlPJgma9G8u7r3E2aqW6qXCexElTCaH2t8A7JWI80CAwEA
AQKCAgBDXLyQGwiQKXPYFDvs/cXz03VNA9/tdQV/SzCT8FQxhXIN5B4DEPQNY08i
KUctjX6j9RtgoQsKKmvx9kY/omaBntvQK/RzDXpJrx62tMM1dmpyCpn7N24d7BlD
QK6DQO+UMCmobdzmrpEzF2mCLelD5C84zRca5FCmm888mKn4gsX+EaNksu4gCr+4
sSs/KyriNHo6EALYjgB2Hx7HP1fbHd8JwhnS1TkmeFN1c/Z6o3GhDTancEjqMu9U
6vRpGIcJvflnzguVBXumJ8boInXPpQVBBybucLmTUhQ1XKbafInFCUKcf881gAXv
AVi/+yjiEm1hqZ2WucpoJc0du1NBz/MP+/MxHGQ/5eaEMIz5X2QcXzQ4xn5ym0sk
Hy0SmH3v/9by1GkK5eH/RTV/8bmtb8Qt0+auLQ6/ummFDjPw866Or4FdL3tx2gug
fONjaZqypee+EmlLG1UmMejjCblmh0bymAHnFkf7tAJsLGd8I00PQiObEqaqd03o
xiYUvrbDpCHah4gB7Uv3AgrHVTbcHsEWmXuNDooD0sSXCFMf3cA81M8vGfkypqi/
ixxZtxtdTU5oCFwI9zEjnQvdA1IZMUAmz8vLwn/fKgENek9PAV3voQr1c0ctZPvy
S/k7HgJt+2Wj7Pqb4mwPgxeYVSBEM7ygOq6Gdisyhi8DP0A2fQKCAQEA6iIrSqQM
pVDqhQsk9Cc0b4kdsG/EM66M7ND5Q2GLiPPFrR59Hm7ViG6h2DhwqSnSRigiO+TN
jIuvD/O0kbmCUZSar19iKPiJipENN+AX3MBm1cS5Oxp6jgY+3jj4KgDQPYmL49fJ
CojnmLKjrAPoUi4f/7s4O1rEAghXPrf5/9coaRPORiNi+bZK0bReJwf1GE/9CPqs
FiZrQNz+/w/1MwFisG6+g0/58fp9j9r6l8JXETjpyO5F+8W8bg8M4V7aoYt5Ec2X
+BG6Gq06Tvm2UssYa6iEVNSKF39ssBzKKALi4we/fcfwjq4bCTKMCjV0Tp3zY/FG
1VyDtMGKrlPnOwKCAQEA57Nw+qdh2wbihz1uKffcoDoW6Q3Ws0mu8ml+UvBn48Ur
41PKrvIb8lhVY7ZiF2/iRyodua9ztE4zvgGs7UqyHaSYHR+3mWeOAE2Hb/XiNVgu
JVupTXLpx3y7d9FxvrU/27KUxhJgcbVpIGRiMn5dmY2S86EYKX1ObjZKmwvFc6+n
1YWgtI2+VOKe5+0ttig6CqzL9qJLZfL6QeAy0yTp/Wz+G1c06XTL87QNeU7CXN00
rB7I4n1Xn422rZnE64MOsARVChyE2fUC9syfimoryR9yIL2xor9QdjL2tK6ziyPq
WgedY4bDjZLM5KbcHcRng0j5WCJV+pX9Hh1c4n5AlwKCAQAxjun68p56n5YEc0dv
Jp1CvpM6NW4iQmAyAEnCqXMPmgnNixaQyoUIS+KWEdxG8kM/9l7IrrWTej2j8sHV
1p5vBjV3yYjNg04ZtnpFyXlDkLYzqWBL0l7+kPPdtdFRkrqBTAwAPjyfrjrXZ3id
gHY8bub3CnnsllnG1F0jOW4BaVl0ZGzVC8h3cs6DdNo5CMYoT0YQEH88cQVixWR0
OLx9/10UW1yYDuWpAoxxVriURt6HFrTlgwntMP2hji37xkggyZTm3827BIWP//rH
nLOq8rJIl3LrQdG5B4/J904TCglcZNdzmE6i5Nd0Ku7ZelcUDPrnvLpxjxORvyXL
oJbhAoIBAD7QV9WsIQxG7oypa7828foCJYni9Yy/cg1H6jZD9HY8UuybH7yT6F2n
8uZIYIloDJksYsifNyfvd3mQbLgb4vPEVnS2z4hoGYgdfJUuvLeng0MfeWOEvroV
J6GRB1wjOP+vh0O3YawR+UEN1c1Iksl5JxijWLCOxv97+nfUFiCJw19QjcPFFY9f
rKLFmvniJ/IS7GydjQFDgPLw+/Zf8IuCy9TPrImJ32zfKDP11R1l3sy2v9EfF+0q
dxbTNB6A9i9jzUYjeyS3lqkfyjS1Gc+5lbAonQq5APA6WsWbAxO6leL4Y4PC2ir8
XE20qsHrKADgfLCXBmYb2XYbkb3ZalsCggEAfOuB9/eLMSmtney3vDdZNF8fvEad
DF+8ss8yITNQQuC0nGdXioRuvSyejOxtjHplMT5GXsgLp1vAujDQmGTv/jK+EXsU
cRe4df5/EbRiUOyx/ZBepttB1meTnsH6cGPN0JnmTMQHQvanL3jjtjrC13408ONK
1yK2S4xJjKYFLT86SjKvV6g5k49ntLYk59nviqHl8bYzAVMoEjb62Z+hERwd/2hx
omsEEjDt4qVqGvSyy+V/1EhqGPzm9ri3zapnorf69rscuXYYsMBZ8M6AtSio4ldB
LjCRNS1lR6/mV8AqUNR9Kn2NLQyJ76yDoEVLulKZqGUsC9STN4oGJLUeFw==
-----END RSA PRIVATE KEY-----
"""
ACME_ROOT = """
-----BEGIN CERTIFICATE-----
MIIFjTCCA3WgAwIBAgIRANOxciY0IzLc9AUoUSrsnGowDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTYxMDA2MTU0MzU1
WhcNMjExMDA2MTU0MzU1WjBKMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDEjMCEGA1UEAxMaTGV0J3MgRW5jcnlwdCBBdXRob3JpdHkgWDMwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCc0wzwWuUuR7dyXTeDs2hjMOrX
NSYZJeG9vjXxcJIvt7hLQQWrqZ41CFjssSrEaIcLo+N15Obzp2JxunmBYB/XkZqf
89B4Z3HIaQ6Vkc/+5pnpYDxIzH7KTXcSJJ1HG1rrueweNwAcnKx7pwXqzkrrvUHl
Npi5y/1tPJZo3yMqQpAMhnRnyH+lmrhSYRQTP2XpgofL2/oOVvaGifOFP5eGr7Dc
Gu9rDZUWfcQroGWymQQ2dYBrrErzG5BJeC+ilk8qICUpBMZ0wNAxzY8xOJUWuqgz
uEPxsR/DMH+ieTETPS02+OP88jNquTkxxa/EjQ0dZBYzqvqEKbbUC8DYfcOTAgMB
AAGjggFnMIIBYzAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADBU
BgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEBATAwMC4GCCsGAQUFBwIB
FiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQub3JnMB0GA1UdDgQWBBSo
SmpjBH3duubRObemRWXv86jsoTAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vY3Js
LnJvb3QteDEubGV0c2VuY3J5cHQub3JnMHIGCCsGAQUFBwEBBGYwZDAwBggrBgEF
BQcwAYYkaHR0cDovL29jc3Aucm9vdC14MS5sZXRzZW5jcnlwdC5vcmcvMDAGCCsG
AQUFBzAChiRodHRwOi8vY2VydC5yb290LXgxLmxldHNlbmNyeXB0Lm9yZy8wHwYD
VR0jBBgwFoAUebRZ5nu25eQBc4AIiMgaWPbpm24wDQYJKoZIhvcNAQELBQADggIB
ABnPdSA0LTqmRf/Q1eaM2jLonG4bQdEnqOJQ8nCqxOeTRrToEKtwT++36gTSlBGx
A/5dut82jJQ2jxN8RI8L9QFXrWi4xXnA2EqA10yjHiR6H9cj6MFiOnb5In1eWsRM
UM2v3e9tNsCAgBukPHAg1lQh07rvFKm/Bz9BCjaxorALINUfZ9DD64j2igLIxle2
DPxW8dI/F2loHMjXZjqG8RkqZUdoxtID5+90FgsGIfkMpqgRS05f4zPbCEHqCXl1
eO5HyELTgcVlLXXQDgAWnRzut1hFJeczY1tjQQno6f6s+nMydLN26WuU4s3UYvOu
OsUxRlJu7TSRHqDC3lSE5XggVkzdaPkuKGQbGpny+01/47hfXXNB7HntWNZ6N2Vw
p7G6OfY+YQrZwIaQmhrIqJZuigsrbe3W+gdn5ykE9+Ky0VgVUsfxo52mwFYs1JKY
2PGDuWx8M6DlS6qQkvHaRUo0FMd8TsSlbF0/v965qGFKhSDeQoMpYnwcmQilRh/0
ayLThlHLN81gSkJjVrPI0Y8xCVPB4twb1PFUd2fPM3sA1tJ83sZ5v8vgFv2yofKR
PB0t6JzUA81mSqM3kxl5e+IZwhYAyO0OTg3/fs8HqGTNKd9BqoUwSRBzp06JMg5b
rUCGwbCUDI0mxadJ3Bz4WxR6fyNpBK2yAinWEsikxqEt
-----END CERTIFICATE-----
"""
ACME_URL = "https://acme-v01.api.letsencrypt.org"
ACME_EMAIL = "jim@example.com"
ACME_TEL = "4088675309"
ACME_DIRECTORY_URL = "https://acme-v01.api.letsencrypt.org"
ACME_DISABLE_AUTORESOLVE = True
ACME_PREFERRED_ISSUER = "R3"
LDAP_AUTH = True
LDAP_BIND_URI = "ldap://localhost"
LDAP_BASE_DN = "dc=example,dc=com"
LDAP_EMAIL_DOMAIN = "example.com"
LDAP_REQUIRED_GROUP = "Lemur Access"
LDAP_DEFAULT_ROLE = "role1"
ALLOW_CERT_DELETION = True
ENTRUST_API_CERT = "api-cert"
ENTRUST_API_KEY = get_random_secret(32)
ENTRUST_API_USER = "user"
ENTRUST_API_PASS = get_random_secret(32)
ENTRUST_URL = "https://api.entrust.net/enterprise/v2"
ENTRUST_ROOT = """
-----BEGIN CERTIFICATE-----
MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
-----END CERTIFICATE-----
"""
ENTRUST_NAME = "lemur"
ENTRUST_EMAIL = "lemur@example.com"
ENTRUST_PHONE = "123456"
ENTRUST_ISSUING = ""
ENTRUST_PRODUCT_ENTRUST = "ADVANTAGE_SSL"
| 41.973384
| 110
| 0.875079
|
94ebc71450e564b8f4a69951afee87f0f3a93673
| 405
|
py
|
Python
|
experiments/fdtd-2d/tmp_files/8010.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/fdtd-2d/tmp_files/8010.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/fdtd-2d/tmp_files/8010.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/8010.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,8,2)
tile(1,4,8,4)
tile(2,2,8,2)
tile(2,4,8,4)
tile(3,2,8,2)
tile(3,4,8,4)
| 22.5
| 116
| 0.716049
|
f72d894c5dd643cc66f3cf18f2330569c6a1b5c9
| 6,672
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/pantoearwandensis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/pantoearwandensis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/pantoearwandensis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Pantoea rwandensis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:19:45.558653
The undirected graph Pantoea rwandensis has 3765 nodes and 306976 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04332 and has 12 connected components, where the component with most
nodes has 3741 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 128, the mean node degree is 163.07, and
the node degree mode is 1. The top 5 most central nodes are 1076550.LH22_12305
(degree 1310), 1076550.LH22_16485 (degree 1299), 1076550.LH22_02530 (degree
1292), 1076550.LH22_19995 (degree 1166) and 1076550.LH22_07950 (degree
1066).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PantoeaRwandensis
# Then load the graph
graph = PantoeaRwandensis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def PantoeaRwandensis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Pantoea rwandensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Pantoea rwandensis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:19:45.558653
The undirected graph Pantoea rwandensis has 3765 nodes and 306976 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04332 and has 12 connected components, where the component with most
nodes has 3741 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 128, the mean node degree is 163.07, and
the node degree mode is 1. The top 5 most central nodes are 1076550.LH22_12305
(degree 1310), 1076550.LH22_16485 (degree 1299), 1076550.LH22_02530 (degree
1292), 1076550.LH22_19995 (degree 1166) and 1076550.LH22_07950 (degree
1066).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PantoeaRwandensis
# Then load the graph
graph = PantoeaRwandensis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="PantoeaRwandensis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.931937
| 223
| 0.702938
|
f46add89112802971419bce983a111ed3621f685
| 1,297
|
py
|
Python
|
test/cpython/test_xmllib.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/cpython/test_xmllib.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/cpython/test_xmllib.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
'''Test module to thest the xmllib module.
Sjoerd Mullender
'''
testdoc = """\
<?xml version="1.0" encoding="UTF-8" standalone='yes' ?>
<!-- comments aren't allowed before the <?xml?> tag,
but they are allowed before the <!DOCTYPE> tag -->
<?processing instructions are allowed in the same places as comments ?>
<!DOCTYPE greeting [
<!ELEMENT greeting (#PCDATA)>
]>
<greeting>Hello, world!</greeting>
"""
nsdoc = "<foo xmlns='URI' attr='val'/>"
from test import test_support
import unittest
# Silence Py3k warning
xmllib = test_support.import_module('xmllib', deprecated=True)
class XMLParserTestCase(unittest.TestCase):
def test_simple(self):
parser = xmllib.XMLParser()
for c in testdoc:
parser.feed(c)
parser.close()
def test_default_namespace(self):
class H(xmllib.XMLParser):
def unknown_starttag(self, name, attr):
self.name, self.attr = name, attr
h=H()
h.feed(nsdoc)
h.close()
# The default namespace applies to elements...
self.assertEqual(h.name, "URI foo")
# but not to attributes
self.assertEqual(h.attr, {'attr':'val'})
def test_main():
test_support.run_unittest(XMLParserTestCase)
if __name__ == "__main__":
test_main()
| 26.469388
| 71
| 0.643793
|
1849d6bbbc33afae0f34ffbeba3eb895383fbf85
| 1,040
|
py
|
Python
|
web/judge/migrations/0004_auto_20160225_2110.py
|
jc-hiroto/esoe-oop-judge-github
|
76e9ba90001d6d3eeee64e9209c50586c5aa766b
|
[
"MIT"
] | 1
|
2020-03-15T05:47:32.000Z
|
2020-03-15T05:47:32.000Z
|
web/judge/migrations/0004_auto_20160225_2110.py
|
jc-hiroto/esoe-oop-judge-github
|
76e9ba90001d6d3eeee64e9209c50586c5aa766b
|
[
"MIT"
] | 3
|
2020-03-15T06:17:12.000Z
|
2021-06-10T22:39:28.000Z
|
web/judge/migrations/0004_auto_20160225_2110.py
|
jc-hiroto/esoe-oop-judge-github
|
76e9ba90001d6d3eeee64e9209c50586c5aa766b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-25 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0003_auto_20160223_2222'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='running_time',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='submission',
name='score',
field=models.IntegerField(db_index=True, default=0),
),
migrations.AlterField(
model_name='submission',
name='status',
field=models.CharField(choices=[('SU', 'Submitting'), ('SE', 'Submission Error'), ('JU', 'Judging'), ('AC', 'Accepted'), ('PA', 'Partially Accepted'), ('TL', 'Time Limit Exceeded'), ('ML', 'Memory Limit Exceeded'), ('RE', 'Runtime Error'), ('CE', 'Compile Error')], default='SU', max_length=2),
),
]
| 33.548387
| 306
| 0.584615
|
70e24749170e918510099ad864529e0ceda16cae
| 8,194
|
py
|
Python
|
vega/trainer/callbacks/metrics_evaluator.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | null | null | null |
vega/trainer/callbacks/metrics_evaluator.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | null | null | null |
vega/trainer/callbacks/metrics_evaluator.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ProgressLogger call defination."""
import vega
from copy import deepcopy
from .callback import Callback
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.CALLBACK)
class MetricsEvaluator(Callback):
"""Callback that shows the progress of evaluating metrics."""
def __init__(self):
"""Initialize MetricsEvaluator callback."""
super(MetricsEvaluator, self).__init__()
self.priority = 230
def before_train(self, logs=None):
"""Be called before the training process."""
self.do_validation = self.params.get('do_validation', False)
self.cur_loss = None
self.loss_avg = None
self.cur_train_perfs = None
self.best_train_perfs = None
self.cur_valid_perfs = None
self.best_valid_perfs = None
self.best_valid_changed = False
self.summary_perfs = None
self.perfs_cmp_mode = self.trainer.config.perfs_cmp_mode
self.perfs_cmp_key = self.trainer.config.perfs_cmp_key
# get_train_metric_after_epoch: detector or no need to get train_metrics after epoch
self.get_train_metric_after_epoch = self.trainer.config.get_train_metric_after_epoch
def before_epoch(self, epoch, logs=None):
"""Be called before each epoach."""
self.train_metrics = self.trainer.train_metrics
self.valid_metrics = self.trainer.valid_metrics
self.counted_steps = 0
self.total_loss = 0
if self.train_metrics is not None:
self.train_metrics.reset()
if self.do_validation and self.valid_metrics is not None:
self.valid_metrics.reset()
def before_train_step(self, batch_index, logs=None):
"""Be called before a batch training."""
self.train_batch = logs['train_batch']
def after_train_step(self, batch_index, logs=None):
"""Be called after each train batch."""
if isinstance(self.train_batch, list) and isinstance(self.train_batch[0], dict):
input, target = self.train_batch, None
else:
input, target = self.train_batch
if isinstance(logs['lr'], list):
self.lr = logs['lr'][0]
else:
self.lr = logs['lr']
if self.trainer.config.is_detection_trainer:
self.cur_loss = logs['loss']
self.loss_avg = self.cur_loss
else:
if isinstance(input, dict):
batch_size = 1
elif isinstance(input, list):
batch_size = len(input)
else:
batch_size = input.size(0)
self.cur_loss = logs['loss']
self.loss_avg = self._average_loss(batch_size, self.cur_loss)
logs.update({'cur_loss': self.cur_loss, 'loss_avg': self.loss_avg, 'lr': self.lr})
def before_valid_step(self, batch_index, logs=None):
"""Be called before a batch validation."""
self.valid_batch = logs['valid_batch']
def after_valid_step(self, batch_index, logs=None):
"""Be called after each batch of validation."""
if self.do_validation and self.valid_metrics is not None:
if isinstance(self.valid_batch, list) and isinstance(self.valid_batch[0], dict):
target = self.valid_batch
else:
target = self.valid_batch[1]
output = logs['valid_batch_output']
self.valid_metrics(output, target)
def after_valid(self, logs=None):
"""Be called after validation."""
if self.do_validation and self.valid_metrics is not None:
# Get the summary of valid metrics
metrics_results = self.valid_metrics.results
if vega.is_torch_backend() and self.trainer.distributed:
for key, value in metrics_results.items():
metrics_results[key] = self.trainer._metric_average(value, key)
if 'loss' in metrics_results:
metrics_results.pop('loss')
if 'global_step' in metrics_results:
metrics_results.pop('global_step')
self.cur_valid_perfs = metrics_results
logs.update({'cur_valid_perfs': self.cur_valid_perfs})
# update best valid perfs based on current valid valid_perfs
if self.best_valid_perfs is None:
self.best_valid_changed = True
self.best_valid_perfs = self.cur_valid_perfs
else:
self.best_valid_changed = self._update_best_perfs(self.cur_valid_perfs,
self.best_valid_perfs)
logs.update({'cur_valid_perfs': self.cur_valid_perfs,
'best_valid_perfs': self.best_valid_perfs,
'best_valid_perfs_changed': self.best_valid_changed})
def after_epoch(self, epoch, logs=None):
"""Be called after each epoch."""
self.summary_perfs = logs.get('summary_perfs', {})
self.summary_perfs.update({'loss_avg': self.loss_avg})
if self.train_metrics is not None and self.get_train_metric_after_epoch:
# Get the summary of train metrics
metrics_results = self.train_metrics.results
self.cur_train_perfs = metrics_results
# update best train perfs based on current train perfs
if self.best_train_perfs is None:
self.best_train_perfs = deepcopy(self.cur_train_perfs)
else:
self._update_best_perfs(self.cur_train_perfs,
self.best_train_perfs)
self.summary_perfs.update({'cur_train_perfs': self.cur_train_perfs,
'best_train_perfs': self.best_train_perfs})
if self.do_validation and self.valid_metrics is not None:
self.summary_perfs.update({'cur_valid_perfs': self.cur_valid_perfs,
'best_valid_perfs': self.best_valid_perfs,
'best_valid_perfs_changed': self.best_valid_changed})
logs.update({'summary_perfs': self.summary_perfs})
def after_train(self, logs=None):
"""Be called before training."""
self.after_epoch(self.trainer.epochs, logs)
def _update_best_perfs(self, cur_perfs, best_perfs):
best_changed = False
if self.perfs_cmp_key is None:
# Select the first kye as default for comparsion
self.perfs_cmp_key = list(cur_perfs.keys())[0]
# Get the values for comparsion based on key
if isinstance(best_perfs[self.perfs_cmp_key], list):
best_val = best_perfs[self.perfs_cmp_key][0]
cur_val = cur_perfs[self.perfs_cmp_key][0]
else:
best_val = best_perfs[self.perfs_cmp_key]
cur_val = cur_perfs[self.perfs_cmp_key]
# Store the perfs after comparison based on mode
if self.perfs_cmp_mode is None:
self.perfs_cmp_mode = self.valid_metrics.objectives.get(self.perfs_cmp_key)
if self.perfs_cmp_mode == 'MAX':
if cur_val > best_val:
best_perfs.update(deepcopy(cur_perfs))
best_changed = True
elif self.perfs_cmp_mode == 'MIN':
if cur_val < best_val:
best_perfs.update(deepcopy(cur_perfs))
best_changed = True
else:
best_perfs.update(deepcopy(cur_perfs))
best_changed = True
return best_changed
def _average_loss(self, batch_size, cur_loss):
self.counted_steps += batch_size
self.total_loss += cur_loss * batch_size
averaged_loss = self.total_loss / self.counted_steps
return averaged_loss
| 45.270718
| 92
| 0.631316
|
de46ca6dd5ff959d02ea01656a89eb1105dd5065
| 2,193
|
py
|
Python
|
pcbs/kicad_db/kicad_StepUp_tools/kicadStepUpMod/Init.py
|
Roboy/bldc_testbed
|
a52860d40627feb2c4b40d96fb973122e41c464f
|
[
"BSD-3-Clause"
] | 2
|
2019-11-27T03:54:12.000Z
|
2020-07-19T09:02:06.000Z
|
pcbs/kicad_db/kicad_StepUp_tools/kicadStepUpMod/Init.py
|
Roboy/bldc_testbed
|
a52860d40627feb2c4b40d96fb973122e41c464f
|
[
"BSD-3-Clause"
] | null | null | null |
pcbs/kicad_db/kicad_StepUp_tools/kicadStepUpMod/Init.py
|
Roboy/bldc_testbed
|
a52860d40627feb2c4b40d96fb973122e41c464f
|
[
"BSD-3-Clause"
] | 2
|
2020-09-30T13:40:09.000Z
|
2020-10-14T21:35:23.000Z
|
# Idf init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
# two options for IDF added by Milos Koutny (12-Feb-2010)
FreeCAD.addImportType("Kicad pcb board/mod File Type (*.kicad_pcb *.emn *.kicad_mod)","kicadStepUptools")
#FreeCAD.addImportType("IDF emp File Type (*.emp)","Import_Emp")
| 57.710526
| 106
| 0.45098
|
480e0eeb14fec032c8baf838bf916ca87ca12ac0
| 1,082
|
py
|
Python
|
setup.py
|
ohahlev/ahlev-django-location
|
7d6060ab7b21509f53790f5863b596f2b95c286a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ohahlev/ahlev-django-location
|
7d6060ab7b21509f53790f5863b596f2b95c286a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ohahlev/ahlev-django-location
|
7d6060ab7b21509f53790f5863b596f2b95c286a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import location
setup(
name='ahlev-django-location',
version=location.__version__,
description='location app using django framework',
long_description='location app using django framework',
long_description_content_type='text/x-rst',
author='ahlev',
author_email='ohahlev@gmail.com',
include_package_data=True,
url='https://github.com/ohahlev/ahlev-django-location/tree/%s' % location.__version__,
packages=find_packages(),
install_requires=[
'django-tinymce',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe=False,
)
# Usage of setup.py:
# $> python setup.py register # registering package on PYPI
# $> python setup.py build sdist upload # build, make source dist and upload to PYPI
| 32.787879
| 90
| 0.666359
|
74571d6bb03c639a683afd4b035d536ff41ec503
| 3,929
|
py
|
Python
|
bitmovin_api_sdk/models/ad_analytics_greater_than_or_equal_filter.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/models/ad_analytics_greater_than_or_equal_filter.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/models/ad_analytics_greater_than_or_equal_filter.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.ad_analytics_abstract_filter import AdAnalyticsAbstractFilter
from bitmovin_api_sdk.models.ad_analytics_attribute import AdAnalyticsAttribute
import pprint
import six
class AdAnalyticsGreaterThanOrEqualFilter(AdAnalyticsAbstractFilter):
@poscheck_model
def __init__(self,
name=None,
value=None):
# type: (AdAnalyticsAttribute, object) -> None
super(AdAnalyticsGreaterThanOrEqualFilter, self).__init__(name=name)
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def openapi_types(self):
types = {}
if hasattr(super(AdAnalyticsGreaterThanOrEqualFilter, self), 'openapi_types'):
types = getattr(super(AdAnalyticsGreaterThanOrEqualFilter, self), 'openapi_types')
types.update({
'value': 'object'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(AdAnalyticsGreaterThanOrEqualFilter, self), 'attribute_map'):
attributes = getattr(super(AdAnalyticsGreaterThanOrEqualFilter, self), 'attribute_map')
attributes.update({
'value': 'value'
})
return attributes
@property
def value(self):
# type: () -> object
"""Gets the value of this AdAnalyticsGreaterThanOrEqualFilter.
:return: The value of this AdAnalyticsGreaterThanOrEqualFilter.
:rtype: object
"""
return self._value
@value.setter
def value(self, value):
# type: (object) -> None
"""Sets the value of this AdAnalyticsGreaterThanOrEqualFilter.
:param value: The value of this AdAnalyticsGreaterThanOrEqualFilter.
:type: object
"""
if value is not None:
if not isinstance(value, object):
raise TypeError("Invalid type for `value`, type has to be `object`")
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(AdAnalyticsGreaterThanOrEqualFilter, self), "to_dict"):
result = super(AdAnalyticsGreaterThanOrEqualFilter, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdAnalyticsGreaterThanOrEqualFilter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.471074
| 164
| 0.62255
|
738d8af85050022696ea7bea03e265239930ec89
| 2,561
|
py
|
Python
|
hprof/_special_cases.py
|
SonyMobile/py-hyprof
|
932f559640da450c39966c35072464ce4b003d1a
|
[
"MIT"
] | 11
|
2020-04-09T05:58:20.000Z
|
2022-02-27T16:59:19.000Z
|
hprof/_special_cases.py
|
SonyMobile/py-hyprof
|
932f559640da450c39966c35072464ce4b003d1a
|
[
"MIT"
] | null | null | null |
hprof/_special_cases.py
|
SonyMobile/py-hyprof
|
932f559640da450c39966c35072464ce4b003d1a
|
[
"MIT"
] | 3
|
2020-04-15T07:10:16.000Z
|
2021-12-09T08:47:32.000Z
|
# Copyright (C) 2020 Sony Mobile Communications Inc.
# Licensed under the LICENSE.
'''
This module contains code for handling some Java classes in specific ways, e.g.
making str() return the actual text of a java.lang.String.
'''
import codecs
def _jstr_to_str(self):
''' get the string contents of a java.lang.String '''
data = getattr(self, 'value', None)
if data is not None:
# OpenJDK has a 'coder' attribute that tells us the encoding:
# https://github.com/openjdk/jdk/blob/6c9d6507/src/java.base/share/classes/java/lang/String.java#L163
coder = getattr(self, 'coder', None)
if len(data) == 0:
return ''
elif isinstance(data[0], bytes) or isinstance(data[0], int):
# Could be ART/Android 'compressed' ascii bytes, or OpenJDK bytes
static_latin1 = getattr(self, 'LATIN1', None)
static_utf16 = getattr(self, 'UTF16', None)
if coder is None:
# could be Android/ART 'compressed'
bytes_encoding = 'ascii'
elif coder == static_latin1:
bytes_encoding = 'latin-1'
elif coder == static_utf16:
# big- or little-endian? May depend on the machine the hprof came from.
# Let's guess little-endian.
bytes_encoding = 'utf-16-le'
else:
raise ValueError('unknown string class encoding')
return bytes(b&0xff for b in data).decode(bytes_encoding)
elif coder is None and isinstance(data[0], str):
# Looks like ART/Android 'uncompressed' utf16 chars.
# char arrays may have surrogate pairs that should be merged into
# real unicode characters. The simplest solution is to flatten them
# to bytes, then decode them properly.
joined = ''.join(data)
flattened = codecs.encode(joined, 'utf-16-be', 'surrogatepass')
return flattened.decode('utf-16-be')
# alright, let the wrapper handle it.
raise TypeError('unknown string class layout')
def _wrap_with_fallback(old, new):
def fallback_wrapper(*args, **kwargs):
''' calls the replacement function; if it fails, calls the original. '''
try:
return new(*args, **kwargs)
except Exception: # pylint: disable=broad-except
if old is None:
raise
return old(*args, **kwargs)
return fallback_wrapper
def add(hprof_file, clsname, method_name, func):
''' add a special function onto a class. '''
for heap in hprof_file.heaps:
for cls in heap.classes.get(clsname, ()):
old = getattr(cls, method_name, None)
wrapper = _wrap_with_fallback(old, func)
setattr(cls, method_name, wrapper)
def setup_builtins(hf):
''' setup all special case builtins. '''
add(hf, 'java.lang.String', '__str__', _jstr_to_str)
| 34.146667
| 103
| 0.705974
|
ce38b23f0def48d6b313dbf6dc93e8db0579e153
| 5,470
|
py
|
Python
|
train_cnn.py
|
damaha/iasi-atmosphere
|
e54ef3f0ca57afdaff43a6c6915c7cabbd430ee4
|
[
"MIT"
] | null | null | null |
train_cnn.py
|
damaha/iasi-atmosphere
|
e54ef3f0ca57afdaff43a6c6915c7cabbd430ee4
|
[
"MIT"
] | null | null | null |
train_cnn.py
|
damaha/iasi-atmosphere
|
e54ef3f0ca57afdaff43a6c6915c7cabbd430ee4
|
[
"MIT"
] | null | null | null |
import json, os
import numpy as np
import pandas as pd
from data_load import get_datafiles
from generators import iasi_generator
from models import *
from keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, ReduceLROnPlateau
from keras.utils import multi_gpu_model
from keras.models import model_from_json
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
### Parameters ###
params = {"ModelName" : "models/model_firsttry",
"ModelType" : simple_model_1,
"n_comp" : 128,
"f_spec" : 5,
"batchsize" : 16,
"epochs" : 52,
"period_weights" : 10,
"seed" : 18374,
"outputs" : np.arange(47, 137).astype(int),
"path" : ".../IASI/data_v3/",
"train_code" : open("train_cnn.py", "r").read(),
"model_code" : open("models.py", "r").read(),
"generator_code" : open("generators.py", "r").read()}
np.random.seed(params['seed'])
### Split data and make generators ###
train_files, valid_files = get_datafiles(params["path"])
X_shape = np.load(valid_files[0][0]).shape
Y_shape = np.load(valid_files[0][1]).shape
indexs = [None, params["outputs"]]
dc = json.load(open("scaling_coeffs.json"))
train_generator = iasi_generator(train_files, batch_size=params['batchsize'], selected_channels=indexs, norm_coeffs=[dc['mean'], dc['variance']])
valid_generator = iasi_generator(valid_files, batch_size=params['batchsize'], selected_channels=indexs, norm_coeffs=[dc['mean'], dc['variance']])
model = params["ModelType"](X_shape, params["outputs"].size, n_comp=params["n_comp"], f_spec=params["n_comp"])
if os.path.isfile(params["ModelName"]+"_config.json") and os.path.isfile(params["ModelName"]+".h5"):
if os.path.isfile(params["ModelName"]+"_history.json"):
json_hist = json.load(open(params["ModelName"]+"_history.json","r"))
else:
json_hist = {'loss':[],'val_loss':[]}
json_str = json.load(open(params["ModelName"]+"_config.json","r"))
model = model_from_json(json_str)
model.load_weights(params["ModelName"]+".h5")
log = np.genfromtxt(params["ModelName"]+".log", delimiter=",",skip_header=1)
e_init = int(log[-1,0] // params["period_weights"] * params["period_weights"])
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
print("Continuing training process from epoch %d" % e_init)
else:
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
json_string = model.to_json()
json.dump(json_string, open(params["ModelName"]+"_config.json", "w"))
model_object = model
e_init = 0
json_hist = {'loss':[],'val_loss':[]}
nb_gpus = len(get_available_gpus())
if nb_gpus > 1:
m_model = multi_gpu_model(model, gpus=nb_gpus)
m_model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
model_object = m_model
else:
model_object = model
import time
start = time.time()
history = model_object.fit_generator(train_generator,
callbacks=[ModelCheckpoint(params["ModelName"].split('/')[-1]+".{epoch:02d}.h5",
monitor='loss',
verbose=1,
period=params["period_weights"],
save_weights_only=True),
# TensorBoard(log_dir='tmp/logs/'),
CSVLogger(params["ModelName"]+'.log'),
ReduceLROnPlateau(monitor='loss',
factor=0.2,
patience=5,
min_lr=0.0001,
min_delta=0.001)],
validation_data=valid_generator,
epochs=params["epochs"],
max_queue_size=5,
verbose=2,
initial_epoch=e_init)
seconds = time.time()-start
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print('It took %d:%d:%02d:%02d to train.' % (d, h, m, s))
model.save_weights(params["ModelName"]+".h5")
import socket
dct = history.history
dct['loss'] = json_hist['loss']+dct['loss']
dct['val_loss'] = json_hist['val_loss']+dct['val_loss']
dct["number_of_gpus"] = nb_gpus
dct["hostname"] = socket.gethostname()
dct["training_files"] = str(train_files)
dct["test_files"] = str(valid_files)
dct["training_time"] = '%d:%d:%02d:%02d' % (d, h, m, s)
dct.update(params)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
json.dump(dct, open(params["ModelName"]+"_history.json", 'w'), cls=NpEncoder)
| 41.755725
| 145
| 0.568556
|
b443357f110ebd075f6ea689480bb84b32219729
| 3,733
|
py
|
Python
|
utils/scripts/c64/C64Charset.py
|
xahmol/8bit-Unity
|
b4f3bee00e012ca1755afba550a5270dce0a1054
|
[
"BSD-2-Clause"
] | 42
|
2018-12-12T01:00:59.000Z
|
2022-03-27T07:32:29.000Z
|
utils/scripts/c64/C64Charset.py
|
xahmol/8bit-Unity
|
b4f3bee00e012ca1755afba550a5270dce0a1054
|
[
"BSD-2-Clause"
] | 13
|
2020-11-06T13:50:45.000Z
|
2022-01-25T07:17:37.000Z
|
utils/scripts/c64/C64Charset.py
|
xahmol/8bit-Unity
|
b4f3bee00e012ca1755afba550a5270dce0a1054
|
[
"BSD-2-Clause"
] | 8
|
2020-11-14T04:30:26.000Z
|
2021-01-16T17:55:19.000Z
|
"""
* Copyright (c) 2018 Anthony Beaucamp.
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented * you must not
* claim that you wrote the original software. If you use this software in a
* product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not
* be misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any distribution.
*
* 4. The names of this software and/or it's copyright holders may not be
* used to endorse or promote products derived from this software without
* specific prior written permission.
"""
import io, os, sys, csv
from PIL import Image
charFile = sys.argv[1]
output = sys.argv[2]
try:
sharedColors = [int(n) for n in sys.argv[3].split(',')]
except:
sharedColors = []
flagFile = charFile.replace('-c64.png', '.csv')
#############################
# Read char and font bitmaps
charImg = Image.open(charFile)
charRaw = list(charImg.getdata())
print "Charset size: {%i,%i}; Colors: %i" % (charImg.size[0], charImg.size[1], max(charRaw))
#########################
# Allocate shared colors
if sharedColors == []:
distrib = [0] * 16
for row in range(0, charImg.size[1], 8):
for col in range(0, charImg.size[0], 4):
# Collect block data
block = []
for j in range(0, 8):
index = (row+j)*charImg.size[0]+col
for i in range(0, 4):
block.append(charRaw[index+i])
for i in range(0, 16):
if i in block:
distrib[i] += 1
popular = sorted(range(len(distrib)), key=distrib.__getitem__)[13:16]
order = sorted(range(len(popular)), key=popular.__getitem__)
sharedColors = [popular[i] for i in order]
colData = [chr(c) for c in sharedColors]
############################
# Rearrange into 4*8 blocks
charBlocks = []
attrData = [chr(0x0f)] * 128
for row in range(0, charImg.size[1], 8):
for col in range(0, charImg.size[0], 4):
for j in range(0, 8):
for i in range(0, 4):
color = charRaw[(row+j)*charImg.size[0]+col+i]
if color in sharedColors:
charBlocks.append(sharedColors.index(color))
else:
attrData[row*4+col/4] = chr(color%8+8)
charBlocks.append(3)
############################################
# Convert char and font data to C64 format
charData = [chr(0)] * (128*8)
for i in range(0, len(charBlocks), 4):
charData[i/4] = chr((charBlocks[i+0]<<6) + (charBlocks[i+1]<<4) + (charBlocks[i+2]<<2) + (charBlocks[i+3]<<0))
#######################
# Read character flags
flagData = [chr(0)] * 128
with open(flagFile) as csvfile:
i = 0
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
for elt in row:
flagData[i] = chr(int(elt))
i += 1
############################
# Write output binary file
f2 = io.open(output, 'wb')
f2.write(''.join(colData))
f2.write(''.join(charData))
f2.write(''.join(attrData))
f2.write(''.join(flagData))
f2.close()
| 35.552381
| 115
| 0.574873
|
5854eb450a0117586b28899963e92f6b24d51f1d
| 7,956
|
py
|
Python
|
app/grandchallenge/core/views.py
|
kant/grand-challenge.org
|
608266ae3376448fc56c3bb4e34138ab81a45e2a
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/core/views.py
|
kant/grand-challenge.org
|
608266ae3376448fc56c3bb4e34138ab81a45e2a
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/core/views.py
|
kant/grand-challenge.org
|
608266ae3376448fc56c3bb4e34138ab81a45e2a
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.core.files.storage import DefaultStorage
from django.http import Http404
from django.shortcuts import render
from django.template import Template, TemplateSyntaxError, RequestContext
from django.utils._os import safe_join
from grandchallenge.challenges.models import Challenge
from grandchallenge.subdomains.utils import reverse
from grandchallenge.pages.models import Page, ErrorPage
def site(request):
site = request.challenge
pages = site.page_set.all()
if len(pages) == 0:
currentpage = ErrorPage(
challenge=site,
title="no_pages_found",
html="No pages found for this site. Please log in and add some pages.",
)
else:
currentpage = pages[0]
currentpage = getRenderedPageIfAllowed(currentpage, request)
return render(
request,
"page.html",
{"site": site, "currentpage": currentpage, "pages": pages},
)
def renderTags(request, p, recursecount=0):
""" render page contents using django template system
This makes it possible to use tags like '{% dataset %}' in page content.
If a rendered tag results in another tag, this can be rendered recursively
as long as recurse limit is not exceeded.
"""
recurselimit = 2
try:
t = Template("{% load grandchallenge_tags %}" + p.html)
except TemplateSyntaxError as e:
# when page contents cannot be rendered, just display raw contents and include error message on page
errormsg = (
'<span class="pageError"> Error rendering template: %s </span>' % e
)
pagecontents = p.html + errormsg
return pagecontents
# pass page to context here to be able to render tags based on which page does the rendering
context = RequestContext(request, {"currentpage": p})
pagecontents = t.render(context)
if (
"{%" in pagecontents or "{{" in pagecontents
): # if rendered tags results in another tag, try to render this as well
if recursecount < recurselimit:
p2 = copy_page(p)
p2.html = pagecontents
return renderTags(request, p2, recursecount + 1)
else:
# when page contents cannot be rendered, just display raw contents and include error message on page
errormsg = (
'<span class="pageError"> Error rendering template: rendering recursed further than'
+ str(recurselimit)
+ " </span>"
)
pagecontents = p.html + errormsg
return pagecontents
def permissionMessage(request, p):
if request.user.is_authenticated:
msg = """ <div class="system_message">
<h2> Restricted page</h2>
<p>This page can only be viewed by participants of this project to view this page please make sure of the following:</p>
<ul>
<li>First, log in to {} by using the 'Sign in' button at the top right.</li>
<li>Second, you need to join / register with the specific project you are interested in as a participant.
The link to do this is provided by the project organizers on the project website.</li>
</ul>
<div>
""".format(
settings.MAIN_PROJECT_NAME
)
title = p.title
else:
msg = (
"The page '"
+ p.title
+ "' can only be viewed by registered users. Please sign in to view this page."
)
title = p.title
return ErrorPage(challenge=request.challenge, title=title, html=msg)
# TODO: could a decorator be better then all these ..IfAllowed pages?
def getRenderedPageIfAllowed(page_or_page_title, request):
""" check permissions and render tags in page. If string title is given page is looked for
return nice message if not allowed to view"""
if isinstance(page_or_page_title, bytes):
page_or_page_title = page_or_page_title.decode()
if isinstance(page_or_page_title, str):
page_title = page_or_page_title
try:
p = request.challenge.page_set.get(title__iexact=page_title)
except Page.DoesNotExist:
raise Http404
else:
p = page_or_page_title
if p.can_be_viewed_by(request.user):
p.html = renderTags(request, p)
currentpage = p
else:
currentpage = permissionMessage(request, p)
return currentpage
def get_data_folder_path(challenge_short_name):
""" Returns physical base path to the root of the folder where all files for
this project are kept """
return safe_join(settings.MEDIA_ROOT, challenge_short_name)
def get_dirnames(path):
""" Get all directory names in path as list of strings
Raises: OSError if directory can not be found
"""
storage = DefaultStorage()
dirnames = storage.listdir(path)[0]
dirnames.sort()
return dirnames
def comicmain(request, page_title=""):
""" show content as main page item. Loads pages from the main project """
challenge_short_name = settings.MAIN_PROJECT_NAME
try:
site = Challenge.objects.get(short_name__iexact=challenge_short_name)
except Challenge.DoesNotExist:
link = reverse("challenges:create")
link = link + "?short_name=%s" % challenge_short_name
link_html = create_HTML_a(
link, "Create project '%s'" % challenge_short_name
)
html = """I'm trying to show the first page for main project '%s' here,
but '%s' does not exist. %s.""" % (
challenge_short_name,
challenge_short_name,
link_html,
)
page = create_temp_page(title="no_pages_found", html=html)
return render(
request,
"temppage.html",
{"site": page.challenge, "currentpage": page},
)
pages = site.page_set.all()
if len(pages) == 0:
link = reverse(
"pages:list", kwargs={"challenge_short_name": challenge_short_name}
)
link_html = create_HTML_a(link, "admin interface")
html = """I'm trying to show the first page for main project '%s' here,
but '%s' contains no pages. Please add
some in the %s.""" % (
challenge_short_name,
challenge_short_name,
link_html,
)
page = create_temp_page(title="no_pages_found", html=html)
return render(
request,
"temppage.html",
{"site": page.challenge, "currentpage": page},
)
if page_title:
pages = [p for p in pages if p.title.lower() == page_title.lower()]
if len(pages) != 1:
raise ValueError(
f"{len(pages)} pages with title {page_title} were found for {site}"
)
page = pages[0]
page.html = renderTags(request, page)
return render(request, "page.html", {"currentpage": page})
# ======================================== not called directly from urls.py ==
def create_HTML_a(link_url, link_text):
return '<a href="' + link_url + '">' + link_text + "</a>"
def create_HTML_a_img(link_url, image_url):
""" create a linked image """
img = '<img src="' + image_url + '">'
linked_image = create_HTML_a(link_url, img)
return linked_image
def copy_page(page):
return Page(challenge=page.challenge, title=page.title, html=page.html)
def create_temp_page(title="temp_page", html=""):
""" Create a quick mockup page which you can show, without needing to read
anything from database
"""
site = Challenge() # any page requires a site, create on the fly here.
site.short_name = "Temp"
site.name = "Temporary page"
site.skin = ""
return Page(challenge=site, title=title, html=html)
| 34
| 138
| 0.620161
|
6658ed3ef1023a8e42b737c37eff5491771e23b9
| 1,081
|
py
|
Python
|
Curso_Em_Video_Python/ex036.py
|
ThallesTorres/Curso_Em_Video_Python
|
95ffbff5a03f11fee41df746604dfe435f385a3b
|
[
"MIT"
] | null | null | null |
Curso_Em_Video_Python/ex036.py
|
ThallesTorres/Curso_Em_Video_Python
|
95ffbff5a03f11fee41df746604dfe435f385a3b
|
[
"MIT"
] | null | null | null |
Curso_Em_Video_Python/ex036.py
|
ThallesTorres/Curso_Em_Video_Python
|
95ffbff5a03f11fee41df746604dfe435f385a3b
|
[
"MIT"
] | null | null | null |
# Ex: 036 - Escreva um progrma para aprovar o empréstimo bancário para a compra
# de uma casa. O programa vai perguntar o valor da casa, o salário do comprador
# e em quantos anos ele vai pagar. Calcule o valor da prestação mensal, dabendo
# que eka não pode exceder 30% do salário ou então o empréstimo será negado.
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 036
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
print('-=-Preencha os Dados-=- ')
valor_casa = int(input('Valor da Casa: R$'))
salario = int(input('Salário do Comprador: R$'))
anos = int(input('Em quantos Anos: '))
meses = anos * 12
prestacao = valor_casa / meses
print('-=-Dados Finais-=-')
print(f'Valor da Casa: R${valor_casa} \nAnos: {anos} \nMeses: {meses} \nPrestações: R${prestacao:.2f}')
if prestacao >= salario * 30 / 100:
print('Empréstimo Não Aprovado!!)')
else:
print('Empréstimo Aprovado!!')
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por Thalles Torres
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
| 34.870968
| 104
| 0.580944
|
c3432f85181dd70d2d23e9283aa3fe38f031b3ac
| 290
|
py
|
Python
|
CS Practical/A21.py
|
adityaruplaha/adityaruplaha-school-projects
|
b19d06f356fafdbe4189bade59d4a4d1ffec567f
|
[
"MIT"
] | null | null | null |
CS Practical/A21.py
|
adityaruplaha/adityaruplaha-school-projects
|
b19d06f356fafdbe4189bade59d4a4d1ffec567f
|
[
"MIT"
] | null | null | null |
CS Practical/A21.py
|
adityaruplaha/adityaruplaha-school-projects
|
b19d06f356fafdbe4189bade59d4a4d1ffec567f
|
[
"MIT"
] | null | null | null |
# Maximum of 3 no.s & sum of no equal no.s
print("Enter 3 no.s:")
a = float(input())
b = float(input())
c = float(input())
m = a
S = 0
L = [a, b, c]
for e in L:
if e > m:
m = e
if L.count(e) == 1:
S += e
print("Maximum:", m)
print("Sum of non-equal numbers:", S)
| 15.263158
| 42
| 0.506897
|
ea2dc0bab98cb4b1c1866b9525684e070969166c
| 9,138
|
py
|
Python
|
audio_to_midi/converter.py
|
surajpaib/audio-to-midi
|
a58c23b8b5b1083e9d993a1b666258d22c861986
|
[
"MIT"
] | null | null | null |
audio_to_midi/converter.py
|
surajpaib/audio-to-midi
|
a58c23b8b5b1083e9d993a1b666258d22c861986
|
[
"MIT"
] | null | null | null |
audio_to_midi/converter.py
|
surajpaib/audio-to-midi
|
a58c23b8b5b1083e9d993a1b666258d22c861986
|
[
"MIT"
] | 1
|
2021-01-25T14:54:06.000Z
|
2021-01-25T14:54:06.000Z
|
import cmath
import numpy
import math
from audio_to_midi import midi_writer, notes
class Converter(object):
"""
An object which takes in a list of audio samples and transforms
them into midi data which is written out to a .mid file.
"""
def __init__(
self,
samples=None,
samplerate=None,
channels=0,
time_window=None,
activation_level=None,
condense=None,
single_note=None,
outfile=None,
progress_callback=None,
):
"""
FFT object constructor.
samples is a list of raw audio samples.
rate is the sampling frequency of the audio samples.
time_window is the interval (in ms) over which to compute the fft's.
activation_level is the volume cutoff percentage for frequencies.
outfile is the MIDI file to be written to.
"""
self.samples = samples
self.channels = channels
self.length = len(samples)
self.samplerate = samplerate
self.time_window = time_window
self.activation_level = activation_level
self.condense = condense
self.single_note = single_note
self.outfile = outfile
self.progress_callback = progress_callback
self.notes = notes.generate()
self.bpm = int((60 * 1000) / self.time_window)
# Get the number of samples per time_window
self.step_size = self.time_window_to_step_size(
self.time_window, self.samplerate
)
def time_window_to_step_size(self, time_window, rate):
"""
time_window is the time in ms over which to compute fft's.
rate is the audio sampling rate in samples/sec.
Transforms the time window into an index step size and
returns the result.
"""
# rate/1000(samples/ms) * time_window(ms) = step_size(samples)
rate_per_ms = rate / 1000
step_size = rate_per_ms * time_window
return int(step_size)
def reduce_freqs(self, freqs):
"""
freqs is a list of amplitudes produced by fft_to_frequencies().
Reduces the list of frequencies to a list of notes and their
respective volumes by determining what note each frequency
is closest to. It then reduces the list of amplitudes for each
note to a single amplitude by summing them together.
"""
reduced_freqs = {}
for freq in freqs:
for key, val in self.notes.items():
key = key - 7
# Find the freq's equivalence class, adding the amplitudes.
if val[0] <= freq[0] <= val[2]:
if key in reduced_freqs.keys():
prev = reduced_freqs[key]
prev += freq[1]
reduced_freqs.update({key: prev})
else:
reduced_freqs.update({key: freq[1]})
return reduced_freqs
def freqs_to_midi(self, freq_list, channel):
"""
freq_list is a list of frequencies with normalized amplitudes.
Takes a list of notes and transforms the amplitude to a
midi volume as well as adding track and channel info.
"""
activation_level = self.activation_level / 100.0
midi_list = []
for freqs in freq_list:
midi_notes = {}
for key, val in freqs.items():
if val >= activation_level:
# The key is the midi note.
midi_notes.update(
{
key: {
"track": 0,
"channel": channel,
"volume": int(127 * val),
"duration": 1,
}
}
)
if self.single_note:
max_note = None
index = None
for note, info in midi_notes.items():
if max_note == None:
max_note = midi_notes[note]
elif info["volume"] > max_note["volume"]:
max_note = midi_notes[note]
index = note
if max_note == None:
midi_notes = {}
else:
midi_notes = {index: max_note}
midi_list.append(midi_notes)
return midi_list
def fft_to_frequencies(self, amplitudes):
"""
amplitudes is a list of amplitudes produced by the fft.
Takes a list of amplitudes and transforms it into a list
of midi notes by passing the list through reduce_freqs()
and freqs_to_midi().
"""
size = len(amplitudes)
freqs = []
# Determine the true amplitudes
for i in range(size // 2):
re = amplitudes[i].real
im = amplitudes[i].imag
amplitude = math.sqrt(re ** 2 + im ** 2)
# Determine the frequency in Hz
freq = i * float(self.samplerate) / size
if freq > 20000.0:
break
else:
freqs.append([freq, amplitude])
# Transform the frequency info into midi compatible data.
return self.reduce_freqs(freqs[1:])
def normalize_freqs(self, freq_list):
"""
freq list is a list of dicts containing all of the frequency
data from the wav file.
Normalizes the amplitudes of every frequency in every time step.
"""
max_amplitude = 0.0
for freqs in freq_list:
for key, freq in freqs.items():
if freq > max_amplitude:
max_amplitude = freq
for freqs in freq_list:
for key, amplitude in freqs.items():
new_amplitude = amplitude / max_amplitude
freqs.update({key: new_amplitude})
return freq_list
def condense_midi_notes(self, midi_list):
"""
midi_list is a list of dicts containing midi compatible data.
Combines consecutive notes accross time steps, using the maximum
volume seen in the list as the resulting note's volume.
"""
for i in range(len(midi_list)):
if i < len(midi_list) - 1:
cur_midi = midi_list[i]
for note, info in cur_midi.items():
j = i + 1
while j < len(midi_list) - 1:
next_midi = midi_list[j]
if note in next_midi.keys():
if next_midi[note]["volume"] > cur_midi[note]["volume"]:
new_volume = next_midi[note]["volume"]
else:
new_volume = cur_midi[note]["volume"]
info.update(
{"duration": info["duration"] + 1, "volume": new_volume}
)
cur_midi.update({note: info})
next_midi.pop(note, None)
else:
break
j += 1
return midi_list
def convert(self):
"""
Performs the fft for each time step and uses fft_to_frequencies
to transform the result into midi compatible data. This data
is then passed to a midi file writer to be written out.
"""
steps = int(len(self.samples) / self.step_size)
writer = midi_writer.MidiWriter(self.outfile, self.time_window, self.bpm)
freqs = []
samples = []
for i in range(self.channels):
samples.append([s[i] for s in self.samples])
self.samples = samples
current = 0
total = self.channels * steps
for channel in range(self.channels):
freqs = []
writer.reset_time()
for i in range(steps):
current += 1
if self.progress_callback:
self.progress_callback(current, total)
if i < steps - 1:
amplitudes = numpy.fft.fft(
self.samples[channel][
self.step_size * i : (self.step_size * i + self.step_size)
]
)
else:
amplitudes = numpy.fft.fft(
self.samples[channel][self.step_size * i :]
)
freqs.append(self.fft_to_frequencies(amplitudes))
freqs = self.normalize_freqs(freqs)
if self.condense:
midi_list = self.condense_midi_notes(self.freqs_to_midi(freqs, channel))
else:
midi_list = self.freqs_to_midi(freqs, channel)
writer.add_notes(midi_list)
writer.write_file()
| 33.472527
| 88
| 0.513789
|
3a1c276ca55878250d87fff12000e5c1f3562b56
| 4,711
|
py
|
Python
|
calc.py
|
dokalanyi/Pascal-Interpreter
|
1719eadc289cac28a1bb742e5990f0bd2e8efce0
|
[
"MIT"
] | null | null | null |
calc.py
|
dokalanyi/Pascal-Interpreter
|
1719eadc289cac28a1bb742e5990f0bd2e8efce0
|
[
"MIT"
] | null | null | null |
calc.py
|
dokalanyi/Pascal-Interpreter
|
1719eadc289cac28a1bb742e5990f0bd2e8efce0
|
[
"MIT"
] | null | null | null |
# Token types
#
# EOF (end-of-file) token is used to indicate that
# there is no more input left for lexical analysis
INTEGER, PLUS, MINUS, MUL, DIV, EOF = 'INTEGER', 'PLUS', 'MINUS', 'MUL', 'DIV', 'EOF'
class Token(object):
def __init__(self, type, value):
# token type: INTEGER, PLUS, or EOF
self.type = type
# token value: 0, 1, 2. 3, 4, 5, 6, 7, 8, 9, '+', or None
self.value = value
def __str__(self):
"""String representation of the class instance.
Examples:
Token(INTEGER, 3)
Token(PLUS '+')
"""
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
return self.__str__()
class Interpreter(object):
def __init__(self, text):
# client string input, e.g. "3+5"
self.text = text
# self.pos is an index into self.text
self.pos = 0
# current token instance
self.current_token = None
def error(self):
raise Exception('Error parsing input')
def get_next_token(self):
"""Lexical analyzer (also known as scanner or tokenizer)
This method is responsible for breaking a sentence
apart into tokens. One token at a time.
"""
text = self.text
# is self.pos index past the end of the self.text ?
# if so, then return EOF token because there is no more
# input left to convert into tokens
if self.pos > len(text) - 1:
return Token(EOF, None)
# get a character at the position self.pos and decide
# what token to create based on the single character
current_char = text[self.pos]
#eliminate white space
while current_char == ' ':
self.pos += 1
current_char = text[self.pos]
# if the character is a digit then convert it to
# integer, create an INTEGER token, increment self.pos
# index to point to the next character after the digit,
# and return the INTEGER token
if current_char.isdigit():
token = Token(INTEGER, int(current_char))
self.pos += 1
return token
if current_char == '+':
token = Token(PLUS, current_char)
self.pos += 1
return token
if current_char == '*':
token = Token(MUL, current_char)
self.pos += 1
return token
if current_char == '/':
token = Token(DIV, current_char)
self.pos += 1
return token
if current_char == '-':
token = Token(MINUS, current_char)
self.pos += 1
return token
self.error()
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
if self.current_token.type == token_type:
self.current_token = self.get_next_token()
else:
self.error()
def expr(self):
"""expr -> INTEGER PLUS INTEGER"""
# set current token to the first token taken from the input
self.current_token = self.get_next_token()
# we expect the current token to be a single-digit integer
left = ''
while self.current_token.type != PLUS and self.current_token.type != EOF and self.current_token.type != MINUS and self.current_token.type != DIV and self.current_token.type != MUL:
left = left + str(self.current_token.value)
self.eat(INTEGER)
# we expect the current token to be a '+' token
op = self.current_token
if self.current_token.type == PLUS:
self.eat(PLUS)
elif self.current_token.type == MINUS:
self.eat(MINUS)
elif self.current_token.type == MUL:
self.eat(MUL)
elif self.current_token.type == DIV:
self.eat(DIV)
# we expect the current token to be a single-digit integer
right = ''
while self.current_token.type != PLUS and self.current_token.type != EOF and self.current_token.type != MINUS and self.current_token.type != DIV and self.current_token.type != MUL:
right = right + str(self.current_token.value)
self.eat(INTEGER)
# after the above call the self.current_token is set to
# EOF token
# at this point INTEGER PLUS INTEGER sequence of tokens
# has been successfully found and the method can just
# return the result of adding two integers, thus
# effectively interpreting client input
result = self.doOp(op, left, right)
return result
def doOp(self, op, left, right):
if op.type == PLUS:
result = int(left) + int(right)
elif op.type == MINUS:
result = int(left) - int(right)
elif op.type == MUL:
result = int(left) * int(right)
elif op.type == DIV:
result = int(left) / int(right)
return result
def main():
while True:
try:
# To run under Python3 replace 'raw_input' call
# with 'input'
text = raw_input('calc> ')
except EOFError:
break
if not text:
continue
interpreter = Interpreter(text)
result = interpreter.expr()
print(result)
if __name__ == '__main__':
main()
| 28.041667
| 182
| 0.683082
|
939c36ccf6618151f365d70182964ee06d8ac61e
| 729
|
py
|
Python
|
recursive_optimizer/__init__.py
|
yick2232/google-research
|
99021ebda945e232abdcc592f2cea1375b3c84f7
|
[
"Apache-2.0"
] | 11
|
2020-01-29T07:25:04.000Z
|
2022-03-05T16:01:21.000Z
|
recursive_optimizer/__init__.py
|
RubensZimbres/google-research
|
562c7c6ef959cb3cb382b1b660ccc45e8f5289c4
|
[
"Apache-2.0"
] | 13
|
2020-01-28T22:19:53.000Z
|
2022-02-10T00:39:26.000Z
|
recursive_optimizer/__init__.py
|
RubensZimbres/google-research
|
562c7c6ef959cb3cb382b1b660ccc45e8f5289c4
|
[
"Apache-2.0"
] | 2
|
2019-12-07T19:01:03.000Z
|
2020-03-19T16:53:04.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Nothing to do. Existence of this file marks the directory as
# importable package; required by Python 3.2 and earlier.
| 40.5
| 74
| 0.76406
|
ce518a4cbeb47b5c82d369ead45612b3497b8a6f
| 855
|
py
|
Python
|
riberry/app/backends/impl/pool/tasks/external_task_receiver.py
|
srafehi/riberry
|
2ffa48945264177c6cef88512c1bc80ca4bf1d5e
|
[
"MIT"
] | 2
|
2019-12-09T10:24:36.000Z
|
2019-12-09T10:26:56.000Z
|
riberry/app/backends/impl/pool/tasks/external_task_receiver.py
|
srafehi/riberry
|
2ffa48945264177c6cef88512c1bc80ca4bf1d5e
|
[
"MIT"
] | 2
|
2018-06-11T11:34:28.000Z
|
2018-08-22T12:00:19.000Z
|
riberry/app/backends/impl/pool/tasks/external_task_receiver.py
|
srafehi/riberry
|
2ffa48945264177c6cef88512c1bc80ca4bf1d5e
|
[
"MIT"
] | null | null | null |
from typing import List
import riberry
from riberry.app import current_context as ctx
from ..task_queue import TaskQueue
def ready_external_tasks() -> List[riberry.model.job.JobExecutionExternalTask]:
return riberry.model.conn.query(
riberry.model.job.JobExecutionExternalTask
).filter_by(
status='READY',
).join(riberry.model.job.JobExecution).filter_by(
status='ACTIVE',
).join(riberry.model.job.Job).filter_by(
instance=ctx.current.riberry_app_instance,
).all()
def queue_receiver_tasks(queue: TaskQueue):
with queue.lock:
if not queue.limit_reached():
with riberry.model.conn:
external_tasks = ready_external_tasks()
while external_tasks and not queue.limit_reached():
queue.submit_receiver_task(external_tasks.pop())
| 31.666667
| 79
| 0.690058
|
ad6fff3a54b188437bd925df50e52c3fa72b81d0
| 1,837
|
py
|
Python
|
projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py
|
ESOGU-SRLAB/opendr
|
f2eb5a6d7a070d3534d470987c3abc69eec53905
|
[
"Apache-2.0"
] | 217
|
2020-04-10T16:39:36.000Z
|
2022-03-30T15:39:04.000Z
|
projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py
|
ESOGU-SRLAB/opendr
|
f2eb5a6d7a070d3534d470987c3abc69eec53905
|
[
"Apache-2.0"
] | 46
|
2021-12-16T13:23:04.000Z
|
2022-03-30T11:36:29.000Z
|
projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py
|
ESOGU-SRLAB/opendr
|
f2eb5a6d7a070d3534d470987c3abc69eec53905
|
[
"Apache-2.0"
] | 29
|
2021-12-16T09:26:13.000Z
|
2022-03-29T15:19:18.000Z
|
import torch
from torch.nn.parallel._functions import Scatter
def scatter(inputs, target_gpus, dim=0, chunk_size=None):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return Scatter.apply(target_gpus, chunk_size, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
res = scatter_map(inputs)
finally:
scatter_map = None
return res
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_size=None):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim, chunk_size) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim, chunk_size) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| 40.822222
| 78
| 0.666848
|
476b947c306085d748a829bf44be6a10610b06f6
| 938
|
py
|
Python
|
examples/redis/cache.py
|
PeterPZhang/open-box
|
987f91edf0f502d678459f7cc50070cae34accd9
|
[
"MIT"
] | null | null | null |
examples/redis/cache.py
|
PeterPZhang/open-box
|
987f91edf0f502d678459f7cc50070cae34accd9
|
[
"MIT"
] | null | null | null |
examples/redis/cache.py
|
PeterPZhang/open-box
|
987f91edf0f502d678459f7cc50070cae34accd9
|
[
"MIT"
] | null | null | null |
from open_box.redis import get_redis_client
from open_box.redis.cache import WrapperCache
startup_nodes = [
{'host': '<host1>', 'port': '<port1>'},
{'host': '<host2>', 'port': '<port2>'},
{'host': '<host3>', 'port': '<port3>'},
]
rc = get_redis_client(startup_nodes)
cache = WrapperCache(rc)
def cache_context_usage(**kwargs):
with cache.CacheContext(key_prefix='good', timeout=5, **kwargs) as ctx:
if ctx.val:
print('[from cache]{}: {}'.format(ctx.key, ctx.val))
else:
ctx.val = 100
print('[to cache]{}: {}'.format(ctx.key, ctx.val))
@cache.CacheDecorator(key_prefix='hnf', timeout=5)
def cache_decorator_usage(a, b=3, *args, **kwargs):
return {'a': a, 'b': b, 'args': args, 'kwargs': kwargs}
# return 1
# return datetime.now()
if __name__ == '__main__':
cache_context_usage(name='hnf', age=28)
print(cache_decorator_usage('lx', 299, age=28))
| 30.258065
| 75
| 0.615139
|
705e5e75732ca7e8ec91e23fa2351c83b1eb9234
| 19,885
|
py
|
Python
|
tensorflow/python/keras/layers/dense_attention.py
|
bubblebooy/tensorflow
|
585de3969452255a1f8cd6d295b87a2e2d8298e9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/layers/dense_attention.py
|
bubblebooy/tensorflow
|
585de3969452255a1f8cd6d295b87a2e2d8298e9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/layers/dense_attention.py
|
bubblebooy/tensorflow
|
585de3969452255a1f8cd6d295b87a2e2d8298e9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention layers that can be used in sequence DNN/CNN models.
This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
Attention is formed by three tensors: Query, Key and Value.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class BaseDenseAttention(Layer):
"""Base Attention class for Dense networks.
This class is suitable for Dense or CNN networks, and not for RNN networks.
Implementations of attention mechanisms should inherit from this class, and
reuse the `apply_attention_scores()` method.
Args:
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
"""
def __init__(self, causal=False, dropout=0.0, **kwargs):
super(BaseDenseAttention, self).__init__(**kwargs)
self.causal = causal
self.dropout = dropout
self.supports_masking = True
def _calculate_scores(self, query, key):
"""Calculates attention scores.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
return NotImplementedError
def _apply_scores(self, scores, value, scores_mask=None, training=None):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
value: Value tensor of shape `[batch_size, Tv, dim]`.
scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
`[batch_size, Tq, Tv]`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must contain
at least one `True` value in each line along the last dimension.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Returns:
Tensor of shape `[batch_size, Tq, dim]`.
"""
if scores_mask is not None:
padding_mask = math_ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
if training is None:
training = K.learning_phase()
weights = nn.softmax(scores)
def dropped_weights():
return nn.dropout(weights, rate=self.dropout)
weights = tf_utils.smart_cond(
training,
dropped_weights,
lambda: array_ops.identity(weights))
return math_ops.matmul(weights, value)
# TODO(b/125916026): Consider exposing a __call__ method with named args.
def call(self, inputs, mask=None, training=None):
self._validate_call_args(inputs=inputs, mask=mask)
q = inputs[0]
v = inputs[1]
k = inputs[2] if len(inputs) > 2 else v
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
scores = self._calculate_scores(query=q, key=k)
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = array_ops.expand_dims(v_mask, axis=-2)
if self.causal:
# Creates a lower triangular mask, so position i cannot attend to
# positions j>i. This prevents the flow of information from the future
# into the past.
scores_shape = array_ops.shape(scores)
# causal_mask_shape = [1, Tq, Tv].
causal_mask_shape = array_ops.concat(
[array_ops.ones_like(scores_shape[:-2]), scores_shape[-2:]],
axis=0)
causal_mask = _lower_triangular_mask(causal_mask_shape)
else:
causal_mask = None
scores_mask = _merge_masks(v_mask, causal_mask)
result = self._apply_scores(
scores=scores, value=v, scores_mask=scores_mask, training=training)
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = array_ops.expand_dims(q_mask, axis=-1)
result *= math_ops.cast(q_mask, dtype=result.dtype)
return result
def compute_mask(self, inputs, mask=None):
self._validate_call_args(inputs=inputs, mask=mask)
if mask:
q_mask = mask[0]
if q_mask is None:
return None
return ops.convert_to_tensor_v2(q_mask)
return None
def _validate_call_args(self, inputs, mask):
"""Validates arguments of the call method."""
class_name = self.__class__.__name__
if not isinstance(inputs, list):
raise ValueError(
'{} layer must be called on a list of inputs, namely [query, value] '
'or [query, value, key].'.format(class_name))
if len(inputs) < 2 or len(inputs) > 3:
raise ValueError(
'{} layer accepts inputs list of length 2 or 3, '
'namely [query, value] or [query, value, key]. '
'Given length: {}'.format(class_name, len(inputs)))
if mask:
if not isinstance(mask, list):
raise ValueError(
'{} layer mask must be a list, '
'namely [query_mask, value_mask].'.format(class_name))
if len(mask) < 2 or len(mask) > 3:
raise ValueError(
'{} layer mask must be a list of length 2 or 3, namely [query_mask, '
'value_mask] or [query_mask, value_mask, key_mask]. Given length: {}'.format(class_name, len(mask)))
def get_config(self):
config = {
'causal': self.causal,
'dropout': self.dropout,
}
base_config = super(BaseDenseAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Attention')
class Attention(BaseDenseAttention):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot
product: `scores = tf.matmul(query, key, transpose_b=True)`.
2. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
3. Use `distribution` to create a linear combination of `value` with
shape `[batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the attention
scores.
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `Attention` in a CNN+Attention network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(value_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.Attention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=False, **kwargs):
super(Attention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
"""Creates scale variable if use_scale==True."""
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=(),
initializer=init_ops.ones_initializer(),
dtype=self.dtype,
trainable=True)
else:
self.scale = None
super(Attention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
scores = math_ops.matmul(query, key, transpose_b=True)
if self.scale is not None:
scores *= self.scale
return scores
def get_config(self):
config = {'use_scale': self.use_scale}
base_config = super(Attention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.AdditiveAttention')
class AdditiveAttention(BaseDenseAttention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]`
and `[batch_size, 1, Tv, dim]` respectively.
2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear
sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)`
3. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
4. Use `distribution` to create a linear combination of `value` with
shape `batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a variable to scale the attention scores.
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `AdditiveAttention` in a CNN+Attention
network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(value_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.AdditiveAttention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=True, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
v_shape = tensor_shape.TensorShape(input_shape[1])
dim = v_shape[-1]
if isinstance(dim, tensor_shape.Dimension):
dim = dim.value
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=[dim],
initializer=init_ops.glorot_uniform_initializer(),
dtype=self.dtype,
trainable=True)
else:
self.scale = None
super(AdditiveAttention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = array_ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = array_ops.expand_dims(key, axis=-3)
if self.use_scale:
scale = self.scale
else:
scale = 1.
return math_ops.reduce_sum(
scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
config = {'use_scale': self.use_scale}
base_config = super(AdditiveAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _lower_triangular_mask(shape):
"""Creates a lower-triangular boolean mask over the last 2 dimensions."""
row_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
col_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
return math_ops.greater_equal(row_index, col_index)
def _merge_masks(x, y):
if x is None:
return y
if y is None:
return x
return math_ops.logical_and(x, y)
| 39.454365
| 112
| 0.687956
|
15044c9ff40d8ee69d3e66a461c632f30038d67f
| 42,592
|
py
|
Python
|
nion/ui/DrawingContext.py
|
dhauge/nionui
|
7b86b70f9a3048b071684bb841963e02b492fdac
|
[
"Apache-2.0"
] | null | null | null |
nion/ui/DrawingContext.py
|
dhauge/nionui
|
7b86b70f9a3048b071684bb841963e02b492fdac
|
[
"Apache-2.0"
] | null | null | null |
nion/ui/DrawingContext.py
|
dhauge/nionui
|
7b86b70f9a3048b071684bb841963e02b492fdac
|
[
"Apache-2.0"
] | null | null | null |
"""
DrawingContext module contains classes related to drawing context.
DrawingContexts are able to be handled directly by the UI system or
produce javascript or svg to do the drawing.
"""
# standard libraries
import base64
import collections
from contextlib import contextmanager
import copy
import io
import logging
import math
import re
import struct
import sys
import time
import threading
import typing
import xml.sax.saxutils
# third party libraries
import imageio
import numpy
# local libraries
# None
# pylint: disable=star-args
def get_rgba_view_from_rgba_data(rgba_data):
return rgba_data.view(numpy.uint8).reshape(rgba_data.shape + (4,))
def get_rgba_data_from_rgba(rgba_image):
return rgba_image.view(numpy.uint32).reshape(rgba_image.shape[:-1])
def get_byte_view(rgba_image):
return rgba_image.view(numpy.uint8).reshape(rgba_image.shape + (-1, ))
def get_red_view(rgba_image, byteorder=None):
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[-1] == 4
if byteorder == 'little':
return bytes[..., 2] # strip A off BGRA
else:
return bytes[..., 1] # strip A off ARGB
def get_green_view(rgba_image, byteorder=None):
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[-1] == 4
if byteorder == 'little':
return bytes[..., 1] # strip A off BGRA
else:
return bytes[..., 2] # strip A off ARGB
def get_blue_view(rgba_image, byteorder=None):
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[-1] == 4
if byteorder == 'little':
return bytes[..., 0] # strip A off BGRA
else:
return bytes[..., 3] # strip A off ARGB
def get_alpha_view(rgba_image, byteorder=None):
if byteorder is None:
byteorder = sys.byteorder
bytes = get_byte_view(rgba_image)
assert bytes.shape[-1] == 4
if byteorder == 'little':
return bytes[..., 3] # A of BGRA
else:
return bytes[..., 0] # A of ARGB
class DrawingContext:
"""
Path commands (begin_path, close_path, move_to, line_to, etc.) should not be intermixed
with transform commands (translate, scale, rotate).
"""
# TODO: stroke_fill
# TODO: circle
__image_id = 0
__image_id_lock = threading.RLock()
def __init__(self):
self.commands = []
self.binary_commands = bytearray()
self.save_count = 0
self.images = dict()
def copy_from(self, drawing_context):
assert self.save_count == 0
assert drawing_context.save_count == 0
self.commands = drawing_context.commands
self.binary_commands = drawing_context.binary_commands
self.images = drawing_context.images
def add(self, drawing_context):
self.commands.extend(drawing_context.commands)
self.binary_commands.extend(drawing_context.binary_commands)
self.images.update(drawing_context.images)
def clear(self):
self.commands = []
self.binary_commands = []
self.save_count = 0
self.images = dict()
def to_js(self):
js = ""
for command in self.commands:
command_id = command[0]
command_args = command[1:]
if command_id == "save":
js += "ctx.save();"
elif command_id == "restore":
js += "ctx.restore();"
elif command_id == "beginPath":
js += "ctx.beginPath();"
elif command_id == "closePath":
js += "ctx.closePath();"
elif command_id == "clip":
js += "ctx.beginPath();"
js += "ctx.rect({0}, {1}, {2}, {3});".format(*command_args)
js += "ctx.clip();"
elif command_id == "translate":
js += "ctx.translate({0}, {1});".format(*command_args)
elif command_id == "scale":
js += "ctx.scale({0}, {1});".format(*command_args)
elif command_id == "rotate":
js += "ctx.rotate({0});".format(*command_args)
elif command_id == "moveTo":
js += "ctx.moveTo({0}, {1});".format(*command_args)
elif command_id == "lineTo":
js += "ctx.lineTo({0}, {1});".format(*command_args)
elif command_id == "rect":
js += "ctx.rect({0}, {1}, {2}, {3});".format(*command_args)
elif command_id == "arc":
x, y, r, sa, ea, ac = command_args
js += "ctx.arc({0}, {1}, {2}, {3}, {4}, {5});".format(x, y, r, sa, ea, "true" if ac else "false")
elif command_id == "arcTo":
x1, y1, x2, y2, r = command_args
js += "ctx.arcTo({0}, {1}, {2}, {3}, {4});".format(x1, y1, x2, y2, r)
elif command_id == "cubicTo":
x1, y1, x2, y2, x, y = command_args
js += "ctx.bezierCurveTo({0}, {1}, {2}, {3}, {4}, {5});".format(x1, y1, x2, y2, x, y)
elif command_id == "quadraticTo":
x1, y1, x, y = command_args
js += "ctx.quadraticCurveTo({0}, {1}, {2}, {3});".format(x1, y1, x, y)
elif command_id == "image":
w, h, image, image_id, a, b, c, d = command_args
js += "ctx.rect({0}, {1}, {2}, {3});".format(a, b, c, d)
elif command_id == "data":
w, h, data, data_id, a, b, c, d, low, high, color_table = command_args
js += "ctx.rect({0}, {1}, {2}, {3});".format(a, b, c, d)
elif command_id == "stroke":
js += "ctx.stroke();"
elif command_id == "sleep":
pass # used for performance testing
elif command_id == "fill":
js += "ctx.fill();"
elif command_id == "fillText":
text, x, y, max_width = command_args
js += "ctx.fillText('{0}', {1}, {2}{3});".format(xml.sax.saxutils.escape(text), x, y, ", {0}".format(max_width) if max_width else "")
elif command_id == "fillStyleGradient":
command_var = command_args[0]
js += "ctx.fillStyle = {0};".format("grad" + str(command_var))
elif command_id == "fillStyle":
js += "ctx.fillStyle = '{0}';".format(*command_args)
elif command_id == "font":
js += "ctx.font = '{0}';".format(*command_args)
elif command_id == "textAlign":
js += "ctx.textAlign = '{0}';".format(*command_args)
elif command_id == "textBaseline":
js += "ctx.textBaseline = '{0}';".format(*command_args)
elif command_id == "strokeStyle":
js += "ctx.strokeStyle = '{0}';".format(*command_args)
elif command_id == "lineWidth":
js += "ctx.lineWidth = {0};".format(*command_args)
elif command_id == "lineDash":
js += "ctx.lineDash = {0};".format(*command_args)
elif command_id == "lineCap":
js += "ctx.lineCap = '{0}';".format(*command_args)
elif command_id == "lineJoin":
js += "ctx.lineJoin = '{0}';".format(*command_args)
elif command_id == "gradient":
command_var, width, height, x1, y1, x2, y2 = command_args # pylint: disable=invalid-name
js_var = "grad" + str(command_var)
js += "var {0} = ctx.createLinearGradient({1}, {2}, {3}, {4});".format(js_var, x1, y1, x2 - x1, y2 - y1)
elif command_id == "colorStop":
command_var, x, color = command_args
js_var = "grad" + str(command_var)
js += "{0}.addColorStop({1}, '{2}');".format(js_var, x, color)
return js
def to_svg(self, size, viewbox):
svg = ""
defs = ""
path = ""
next_clip_id = 1
transform = list()
closers = list()
fill_style = None
fill_opacity = 1.0
stroke_style = None
stroke_opacity = 1.0
line_cap = "square"
line_join = "bevel"
line_width = 1.0
line_dash = None
text_anchor = "start"
text_baseline = "alphabetic"
font_style = None
font_weight = None
font_size = None
font_unit = None
font_family = None
contexts = collections.deque()
gradient_start = None
gradient_stops = list()
# make a SVG 1.1 compatible color, opacity tuple
def parse_color(color_str: str) -> typing.Tuple[str, float]:
color_str = ''.join(color_str.split())
if color_str.startswith("rgba"):
c = re.split("rgba\((\d+),(\d+),(\d+),([\d.]+)\)", color_str)
return f"rgb({c[1]}, {c[2]}, {c[3]})", float(c[4])
return color_str, 1.0
for command in self.commands:
command_id = command[0]
#logging.debug(command_id)
command_args = command[1:]
if command_id == "save":
context = dict()
context["path"] = path
context["transform"] = copy.deepcopy(transform)
context["fill_style"] = fill_style
context["fill_opacity"] = fill_opacity
context["stroke_style"] = stroke_style
context["stroke_opacity"] = stroke_opacity
context["line_cap"] = line_cap
context["line_join"] = line_join
context["line_width"] = line_width
context["line_dash"] = line_dash
context["font_style"] = font_style
context["font_weight"] = font_weight
context["font_size"] = font_size
context["font_unit"] = font_unit
context["font_family"] = font_family
context["text_anchor"] = text_anchor
context["text_baseline"] = text_baseline
context["closers"] = copy.deepcopy(closers)
closers = list()
contexts.append(context)
elif command_id == "restore":
svg += "".join(closers)
context = contexts.pop()
path = context["path"]
transform = context["transform"]
fill_style = context["fill_style"]
fill_opacity = context["fill_opacity"]
font_style = context["font_style"]
font_weight = context["font_weight"]
font_size = context["font_size"]
font_unit = context["font_unit"]
font_family = context["font_family"]
text_anchor = context["text_anchor"]
text_baseline = context["text_baseline"]
stroke_style = context["stroke_style"]
stroke_opacity = context["stroke_opacity"]
line_cap = context["line_cap"]
line_join = context["line_join"]
line_width = context["line_width"]
line_dash = context["line_dash"]
closers = context["closers"]
elif command_id == "beginPath":
path = ""
elif command_id == "closePath":
path += " Z"
elif command_id == "moveTo":
path += " M {0} {1}".format(*command_args)
elif command_id == "lineTo":
path += " L {0} {1}".format(*command_args)
elif command_id == "rect":
x, y, w, h = command_args
path += " M {0} {1}".format(x, y)
path += " L {0} {1}".format(x + w, y)
path += " L {0} {1}".format(x + w, y + h)
path += " L {0} {1}".format(x, y + h)
path += " Z"
elif command_id == "arc":
x, y, r, sa, ea, ac = command_args
# js += "ctx.arc({0}, {1}, {2}, {3}, {4}, {5});".format(x, y, r, sa, ea, "true" if ac else "false")
elif command_id == "arcTo":
x1, y1, x2, y2, r = command_args
# js += "ctx.arcTo({0}, {1}, {2}, {3}, {4});".format(x1, y1, x2, y2, r)
elif command_id == "cubicTo":
path += " C {0} {1}, {2} {3}, {4} {5}".format(*command_args)
elif command_id == "quadraticTo":
path += " Q {0} {1}, {2} {3}".format(*command_args)
elif command_id == "clip":
x, y, w, h = command_args
clip_id = "clip" + str(next_clip_id)
next_clip_id += 1
transform_str = " transform='{0}'".format(" ".join(transform)) if len(transform) > 0 else ""
defs_format_str = "<clipPath id='{0}'><rect x='{1}' y='{2}' width='{3}' height='{4}'{5} /></clipPath>"
defs += defs_format_str.format(clip_id, x, y, w, h, transform_str)
svg += "<g style='clip-path: url(#{0});'>".format(clip_id)
closers.append("</g>")
elif command_id == "translate":
transform.append("translate({0},{1})".format(*command_args))
elif command_id == "scale":
transform.append("scale({0},{1})".format(*command_args))
elif command_id == "rotate":
transform.append("rotate({0})".format(*command_args))
elif command_id == "image":
w, h, image, image_id, a, b, c, d = command_args
png_file = io.BytesIO()
rgba_data = get_rgba_view_from_rgba_data(image)
# image compression is time consuming. pass parameters to make this step as fast as possible.
# see nionswift-642.
imageio.imwrite(png_file, rgba_data[..., (2,1,0,3)], "png", optimize=False, compress_level=1)
png_encoded = base64.b64encode(png_file.getvalue()).decode('utf=8')
transform_str = " transform='{0}'".format(" ".join(transform)) if len(transform) > 0 else ""
svg_format_str = "<image x='{0}' y='{1}' width='{2}' height='{3}' xlink:href='data:image/png;base64,{4}'{5} />"
svg += svg_format_str.format(a, b, c, d, png_encoded, transform_str)
elif command_id == "data":
w, h, data, data_id, a, b, c, d, low, high, color_table, color_table_image_id = command_args
m = 255.0 / (high - low) if high != low else 1
image = numpy.empty(data.shape, numpy.uint32)
if color_table is not None:
adj_color_table = numpy.empty(color_table.shape, numpy.uint32)
# ordering of color_table is BGRA
# ordering of adj_color_table is RGBA
get_byte_view(adj_color_table)[:, 0] = get_byte_view(color_table)[:, 2]
get_byte_view(adj_color_table)[:, 1] = get_byte_view(color_table)[:, 1]
get_byte_view(adj_color_table)[:, 2] = get_byte_view(color_table)[:, 0]
get_byte_view(adj_color_table)[:, 3] = get_byte_view(color_table)[:, 3]
clipped_array = numpy.clip((m * (data - low)).astype(int), 0, 255).astype(numpy.uint8)
image[:] = adj_color_table[clipped_array]
else:
clipped_array = numpy.clip(data, low, high)
numpy.subtract(clipped_array, low, out=clipped_array)
numpy.multiply(clipped_array, m, out=clipped_array)
get_red_view(image)[:] = clipped_array
get_green_view(image)[:] = clipped_array
get_blue_view(image)[:] = clipped_array
get_alpha_view(image)[:] = 255
png_file = io.BytesIO()
# image compression is time consuming. pass parameters to make this step as fast as possible.
# see nionswift-642.
imageio.imwrite(png_file, get_rgba_view_from_rgba_data(image), "png", optimize=False, compress_level=1)
png_encoded = base64.b64encode(png_file.getvalue()).decode('utf=8')
transform_str = " transform='{0}'".format(" ".join(transform)) if len(transform) > 0 else ""
svg_format_str = "<image x='{0}' y='{1}' width='{2}' height='{3}' xlink:href='data:image/png;base64,{4}'{5} />"
svg += svg_format_str.format(a, b, c, d, png_encoded, transform_str)
elif command_id == "stroke":
if stroke_style is not None:
transform_str = " transform='{0}'".format(" ".join(transform)) if len(transform) > 0 else ""
dash_str = " stroke-dasharray='{0}, {1}'".format(line_dash, line_dash) if line_dash else ""
svg += f"<path d='{path}' fill='none' stroke='{stroke_style}' stroke-opacity='{stroke_opacity}' stroke-width='{line_width}' stroke-linejoin='{line_join}' stroke-linecap='{line_cap}'{dash_str}{transform_str} />"
elif command_id == "sleep":
pass # used for performance testing
elif command_id == "fill":
if fill_style is not None:
transform_str = " transform='{0}'".format(" ".join(transform)) if len(transform) > 0 else ""
svg += f"<path d='{path}' fill='{fill_style}' fill-opacity='{fill_opacity}' stroke='none'{transform_str} />"
elif command_id == "fillText":
text, x, y, max_width = command_args
transform_str = " transform='{0}'".format(" ".join(transform)) if len(transform) > 0 else ""
font_str = ""
if font_style:
font_str += " font-style='{0}'".format(font_style)
if font_weight:
font_str += " font-weight='{0}'".format(font_weight)
if font_size:
font_str += " font-size='{0}{1}'".format(font_size, font_unit)
if font_family:
font_str += " font-family='{0}'".format(font_family)
if fill_style:
font_str += " fill='{0}'".format(fill_style)
if fill_opacity < 1.0:
font_str += " fill-opacity='{0}'".format(fill_opacity)
svg_format_str = "<text x='{0}' y='{1}' text-anchor='{3}' alignment-baseline='{4}'{5}{6}>{2}</text>"
svg += svg_format_str.format(x, y, xml.sax.saxutils.escape(text), text_anchor, text_baseline, font_str,
transform_str)
elif command_id == "fillStyleGradient":
command_var = command_args[0]
defs += gradient_start + "".join(gradient_stops) + "</linearGradient>"
fill_style = "url(#{0})".format("grad" + str(command_var))
elif command_id == "fillStyle":
fill_style, fill_opacity = parse_color(command_args[0])
elif command_id == "font":
font_style = None
font_weight = None
font_size = None
font_unit = None
font_family = None
for font_part in [s for s in command_args[0].split(" ") if s]:
if font_part == "italic":
font_style = "italic"
elif font_part == "bold":
font_weight = "bold"
elif font_part.endswith("px") and int(font_part[0:-2]) > 0:
font_size = int(font_part[0:-2])
font_unit = "px"
elif font_part.endswith("pt") and int(font_part[0:-2]) > 0:
font_size = int(font_part[0:-2])
font_unit = "pt"
else:
font_family = font_part
elif command_id == "textAlign":
text_anchors = {"start": "start", "end": "end", "left": "start", "center": "middle", "right": "end"}
text_anchor = text_anchors.get(command_args[0], "start")
elif command_id == "textBaseline":
text_baselines = {"top": "hanging", "hanging": "hanging", "middle": "middle",
"alphabetic": "alphabetic", "ideaographic": "ideaographic", "bottom": "bottom"}
text_baseline = text_baselines.get(command_args[0], "alphabetic")
elif command_id == "strokeStyle":
stroke_style, stroke_opacity = parse_color(command_args[0])
elif command_id == "lineWidth":
line_width = command_args[0]
elif command_id == "lineDash":
line_dash = command_args[0]
elif command_id == "lineCap":
line_caps = {"square": "square", "round": "round", "butt": "butt"}
line_cap = line_caps.get(command_args[0], "square")
elif command_id == "lineJoin":
line_joins = {"round": "round", "miter": "miter", "bevel": "bevel"}
line_join = line_joins.get(command_args[0], "bevel")
elif command_id == "gradient":
# assumes that gradient will be used immediately after being
# declared and stops being defined. this is currently enforced by
# the way the commands are generated in drawing context.
command_var, w, h, x1, y1, x2, y2 = command_args
grad_id = "grad" + str(command_var)
gradient_start = "<linearGradient id='{0}' x1='{1}' y1='{2}' x2='{3}' y2='{4}'>".format(grad_id,
float(x1 / w),
float(y1 / h),
float(x2 / w),
float(y2 / h))
elif command_id == "colorStop":
command_var, x, color = command_args
gradient_stops.append("<stop offset='{0}%' stop-color='{1}' />".format(int(x * 100), color))
else:
logging.debug("Unknown command %s", command)
xmlns = "xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'"
viewbox_str = "{0} {1} {2} {3}".format(viewbox.left, viewbox.top, viewbox.width, viewbox.height)
result = "<svg version='1.1' baseProfile='full' width='{0}' height='{1}' viewBox='{2}' {3}>".format(size.width,
size.height,
viewbox_str,
xmlns)
result += "<defs>" + defs + "</defs>"
result += svg
result += "</svg>"
return result
@contextmanager
def saver(self):
self.save()
try:
yield
finally:
self.restore()
def save(self):
self.commands.append(("save", ))
self.binary_commands.extend(b"save")
self.save_count += 1
def restore(self):
self.commands.append(("restore", ))
self.binary_commands.extend(b"rest")
self.save_count -= 1
def begin_layer(self, layer_id: int, layer_seed: int, a, b, c, d) -> None:
self.commands.append(("begin_layer", int(layer_id), int(layer_seed), float(a), float(b), float(c), float(d)))
self.binary_commands.extend(struct.pack("4siiffff", b"bgly", int(layer_id), int(layer_seed), float(a), float(b), float(c), float(d)))
def end_layer(self, layer_id: int, layer_seed: int, a, b, c, d) -> None:
self.commands.append(("end_layer", int(layer_id), int(layer_seed), float(a), float(b), float(c), float(d)))
self.binary_commands.extend(struct.pack("4siiffff", b"enly", int(layer_id), int(layer_seed), float(a), float(b), float(c), float(d)))
def begin_path(self):
self.commands.append(("beginPath", ))
self.binary_commands.extend(b"bpth")
def close_path(self):
self.commands.append(("closePath", ))
self.binary_commands.extend(b"cpth")
def clip_rect(self, a, b, c, d):
self.commands.append(("clip", float(a), float(b), float(c), float(d)))
self.binary_commands.extend(struct.pack("4sffff", b"clip", float(a), float(b), float(c), float(d)))
def translate(self, x, y):
self.commands.append(("translate", float(x), float(y)))
self.binary_commands.extend(struct.pack("4sff", b"tran", float(x), float(y)))
def scale(self, x, y):
self.commands.append(("scale", float(x), float(y)))
self.binary_commands.extend(struct.pack("4sff", b"scal", float(x), float(y)))
def rotate(self, radians):
self.commands.append(("rotate", math.degrees(float(radians))))
self.binary_commands.extend(struct.pack("4sf", b"rota", math.degrees(float(radians))))
def move_to(self, x, y):
self.commands.append(("moveTo", float(x), float(y)))
self.binary_commands.extend(struct.pack("4sff", b"move", float(x), float(y)))
def line_to(self, x, y):
self.commands.append(("lineTo", float(x), float(y)))
self.binary_commands.extend(struct.pack("4sff", b"line", float(x), float(y)))
def rect(self, l, t, w, h):
self.commands.append(("rect", float(l), float(t), float(w), float(h)))
self.binary_commands.extend(struct.pack("4sffff", b"rect", float(l), float(t), float(w), float(h)))
def round_rect(self, x, y, w, h, r):
self.move_to(x + r, y)
self.arc_to(x + w, y, x + w, y + r, r)
self.arc_to(x + w, y + h, x + w - r, y + h, r)
self.arc_to(x, y + h, x, y + h - r, r)
self.arc_to(x, y, x + r, y, r)
self.close_path()
def arc(self, x, y, r, sa, ea, ac=False):
self.commands.append(("arc", float(x), float(y), float(r), float(sa), float(ea), bool(ac)))
self.binary_commands.extend(struct.pack("4sfffffi", b"arc ", float(x), float(y), float(r), float(sa), float(ea), bool(ac)))
def arc_to(self, x1, y1, x2, y2, r):
self.commands.append(("arcTo", float(x1), float(y1), float(x2), float(y2), float(r)))
self.binary_commands.extend(struct.pack("4sfffff", b"arct", float(x1), float(y1), float(x2), float(y2), float(r)))
def bezier_curve_to(self, x1, y1, x2, y2, x, y):
self.commands.append(("cubicTo", float(x1), float(y1), float(x2), float(y2), float(x), float(y)))
self.binary_commands.extend(struct.pack("4sffffff", b"cubc", float(x1), float(y1), float(x2), float(y2), float(x), float(y)))
def quadratic_curve_to(self, x1, y1, x, y):
self.commands.append(("quadraticTo", float(x1), float(y1), float(x), float(y)))
self.binary_commands.extend(struct.pack("4sffff", b"quad", float(x1), float(y1), float(x), float(y)))
def draw_image(self, img, x, y, width, height):
# img should be rgba pack, uint32
assert img.dtype == numpy.uint32
with DrawingContext.__image_id_lock:
DrawingContext.__image_id += 1
image_id = DrawingContext.__image_id
self.commands.append(
("image", img.shape[1], img.shape[0], img, int(image_id), float(x), float(y), float(width), float(height)))
self.images[str(image_id)] = img
self.binary_commands.extend(struct.pack("4siiiffff", b"imag", img.shape[1], img.shape[0], int(image_id), float(x), float(y), float(width), float(height)))
def draw_data(self, img, x, y, width, height, low, high, color_map_data):
# img should be float
assert img.dtype == numpy.float32
with DrawingContext.__image_id_lock:
DrawingContext.__image_id += 1
image_id = DrawingContext.__image_id
if color_map_data is not None:
DrawingContext.__image_id += 1
color_map_image_id = DrawingContext.__image_id
else:
color_map_image_id = 0
self.images[str(image_id)] = img
if color_map_data is not None:
self.images[str(color_map_image_id)] = color_map_data
self.commands.append(
("data", img.shape[1], img.shape[0], img, int(image_id), float(x), float(y), float(width), float(height), float(low), float(high), color_map_data, int(color_map_image_id)))
self.binary_commands.extend(struct.pack("4siiiffffffi", b"data", img.shape[1], img.shape[0], int(image_id), float(x), float(y), float(width), float(height), float(low), float(high), int(color_map_image_id)))
def stroke(self):
self.commands.append(("stroke", ))
self.binary_commands.extend(b"strk")
def sleep(self, duration):
self.commands.append(("sleep", float(duration)))
self.binary_commands.extend(struct.pack("4sf", b"slep", float(duration)))
def mark_latency(self):
self.commands.append(("latency", time.perf_counter()))
self.binary_commands.extend(struct.pack("<4sd", b"latn", time.perf_counter()))
def message(self, text):
self.commands.append(("message", text))
text_encoded = text.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(text_encoded)), b"mesg", len(text_encoded), text_encoded))
def timestamp(self, timestamp):
self.commands.append(("timestamp", timestamp))
timestamp_encoded = timestamp.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(timestamp_encoded)), b"time", len(timestamp_encoded), timestamp_encoded))
def fill(self):
self.commands.append(("fill", ))
self.binary_commands.extend(b"fill")
def fill_text(self, text, x, y, max_width=None):
text = str(text) if text is not None else str()
self.commands.append(("fillText", text, float(x), float(y), float(max_width) if max_width else 0))
text_encoded = text.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}sfff".format(len(text_encoded)), b"text", len(text_encoded), text_encoded, float(x), float(y), float(max_width) if max_width else 0))
@property
def fill_style(self):
raise NotImplementedError()
@fill_style.setter
def fill_style(self, a):
a = a or "rgba(0, 0, 0, 0.0)"
if isinstance(a, DrawingContext.LinearGradient):
self.commands.extend(a.commands)
self.commands.append(("fillStyleGradient", int(a.command_var)))
self.binary_commands.extend(a.binary_commands)
self.binary_commands.extend(struct.pack("4si", b"flsg", int(a.command_var)))
else:
self.commands.append(("fillStyle", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"flst", len(a_encoded), a_encoded))
@property
def font(self):
raise NotImplementedError()
@font.setter
def font(self, a):
"""
Set the text font.
Supports 'normal', 'bold', 'italic', size specific as '14px', and font-family.
"""
self.commands.append(("font", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"font", len(a_encoded), a_encoded))
def __get_text_align(self):
raise NotImplementedError()
def __set_text_align(self, a):
"""
Set text alignment.
Valid values are 'start', 'end', 'left', 'center', 'right'. Default is 'start'.
Default is 'start'.
"""
self.commands.append(("textAlign", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"algn", len(a_encoded), a_encoded))
text_align = property(__get_text_align, __set_text_align)
def __get_text_baseline(self):
raise NotImplementedError()
def __set_text_baseline(self, a):
"""
Set the text baseline.
Valid values are 'top', 'hanging', 'middle', 'alphabetic', 'ideographic', and 'bottom'.
Default is 'alphabetic'.
"""
self.commands.append(("textBaseline", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"tbas", len(a_encoded), a_encoded))
text_baseline = property(__get_text_baseline, __set_text_baseline)
def __get_stroke_style(self):
raise NotImplementedError()
def __set_stroke_style(self, a):
a = a or "rgba(0, 0, 0, 0.0)"
self.commands.append(("strokeStyle", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"stst", len(a_encoded), a_encoded))
stroke_style = property(__get_stroke_style, __set_stroke_style)
def __get_line_width(self):
raise NotImplementedError()
def __set_line_width(self, a):
self.commands.append(("lineWidth", float(a)))
self.binary_commands.extend(struct.pack("4sf", b"linw", float(a)))
line_width = property(__get_line_width, __set_line_width)
def __get_line_dash(self):
raise NotImplementedError()
def __set_line_dash(self, a):
""" Set the line dash. Takes a single value with the length of the dash. """
self.commands.append(("lineDash", float(a)))
self.binary_commands.extend(struct.pack("4sf", b"ldsh", float(a)))
line_dash = property(__get_line_dash, __set_line_dash)
def __get_line_cap(self):
raise NotImplementedError()
def __set_line_cap(self, a):
""" Set the line join. Valid values are 'square', 'round', 'butt'. Default is 'square'. """
self.commands.append(("lineCap", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"lcap", len(a_encoded), a_encoded))
line_cap = property(__get_line_cap, __set_line_cap)
def __get_line_join(self):
raise NotImplementedError()
def __set_line_join(self, a):
""" Set the line join. Valid values are 'round', 'miter', 'bevel'. Default is 'bevel'. """
self.commands.append(("lineJoin", str(a)))
a_encoded = a.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(a_encoded)), b"lnjn", len(a_encoded), a_encoded))
line_join = property(__get_line_join, __set_line_join)
class LinearGradient:
next = 1
def __init__(self, width, height, x1, y1, x2, y2): # pylint: disable=invalid-name
self.commands = []
self.binary_commands = []
self.command_var = DrawingContext.LinearGradient.next
self.commands.append(("gradient", self.command_var, float(width), float(height), float(x1), float(y1), float(x2), float(y2)))
self.binary_commands.extend(struct.pack("4siffffff", b"grad", self.command_var, float(width), float(height), float(x1), float(y1), float(x2), float(y2)))
DrawingContext.LinearGradient.next += 1
def add_color_stop(self, x, color):
self.commands.append(("colorStop", self.command_var, float(x), str(color)))
color_encoded = color.encode("utf-8")
self.binary_commands.extend(struct.pack("4sifi{}s0i".format(len(color_encoded)), b"grcs", self.command_var, float(x), len(color_encoded), color_encoded))
def create_linear_gradient(self, width, height, x1, y1, x2, y2): # pylint: disable=invalid-name
gradient = DrawingContext.LinearGradient(width, height, x1, y1, x2, y2)
return gradient
def statistics(self, stat_id):
self.commands.append(("statistics", str(stat_id)))
stat_id_encoded = stat_id.encode("utf-8")
self.binary_commands.extend(struct.pack("4si{}s0i".format(len(stat_id_encoded)), b"stat", len(stat_id_encoded), stat_id_encoded))
# https://www.w3.org/TR/SVG11/types.html#ColorKeywords
# processed with:
"""
import re
with open("colors.txt", "r") as f:
while True:
color_line = f.readline()
if not color_line:
break
color_line = color_line.strip()
rgb_line = f.readline().strip().replace(" ", "")
c = re.split("rgb\((\d+),(\d+),(\d+)\)", rgb_line)
print(f"\t\"{color_line}\": \"#{int(c[1]):02x}{int(c[2]):02x}{int(c[3]):02x}\",")
"""
svg_color_map = {
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgreen": "#006400",
"darkgrey": "#a9a9a9",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgray": "#d3d3d3",
"lightgreen": "#90ee90",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
svg_color_reverse_map = {v: k for k, v in svg_color_map.items()}
def color_without_alpha(color: typing.Optional[str]) -> typing.Optional[str]:
if not color:
return None
color = color.strip().replace(" ", "")
c = re.split("rgba\((\d+),(\d+),(\d+),([\d.]+)\)", color)
if len(c) > 1:
return f"#{int(c[1]):02x}{int(c[2]):02x}{int(c[3]):02x}"
c = re.split("rgb\((\d+),(\d+),(\d+)\)", color)
if len(c) > 1:
return f"#{int(c[1]):02x}{int(c[2]):02x}{int(c[3]):02x}"
if color.startswith("#"):
if len(color) == 9:
return f"#{color.lower()[3:]}"
if len(color) == 7:
return color.lower()
return color
def named_color_without_alpha(color: str) -> typing.Optional[str]:
color_with_alpha = color_without_alpha(color)
return svg_color_reverse_map.get(color_with_alpha, color_with_alpha) if color_with_alpha else None
def hex_color(color: str) -> typing.Optional[str]:
if not color:
return None
color = color.strip().replace(" ", "")
c = re.split("rgba\((\d+),(\d+),(\d+),([\d.]+)\)", color)
if len(c) > 1:
return f"#{int(255 * float(c[4])):02x}{int(c[1]):02x}{int(c[2]):02x}{int(c[3]):02x}"
c = re.split("rgb\((\d+),(\d+),(\d+)\)", color)
if len(c) > 1:
return f"#{int(c[1]):02x}{int(c[2]):02x}{int(c[3]):02x}"
if color.startswith("#"):
if len(color) == 9 or len(color) == 7:
return color.lower()
return svg_color_map.get(color, color)
| 43.684103
| 230
| 0.551183
|
e47554772e4cd7fe02f1fc31e4e1459b249a801f
| 5,134
|
py
|
Python
|
build_tools/precommit_checker.py
|
gabbar538/phani538
|
dff4c48ede8cc813596a9660784fea181b787ee0
|
[
"BSD-3-Clause"
] | 5
|
2017-06-19T02:03:11.000Z
|
2019-05-13T06:27:57.000Z
|
build_tools/precommit_checker.py
|
gabbar538/phani538
|
dff4c48ede8cc813596a9660784fea181b787ee0
|
[
"BSD-3-Clause"
] | 60
|
2016-03-19T16:01:27.000Z
|
2016-06-23T16:26:10.000Z
|
build_tools/precommit_checker.py
|
gabbar538/phani538
|
dff4c48ede8cc813596a9660784fea181b787ee0
|
[
"BSD-3-Clause"
] | 5
|
2017-06-14T08:06:46.000Z
|
2019-05-13T06:28:28.000Z
|
#!/usr/local/fbcode/gcc-4.8.1-glibc-2.17-fb/bin/python2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import commands
import subprocess
import sys
import re
import os
import time
#
# Simple logger
#
class Log:
LOG_FILE = "/tmp/precommit-check.log"
def __init__(self):
self.filename = Log.LOG_FILE
self.f = open(self.filename, 'w+', 0)
def caption(self, str):
line = "\n##### %s #####\n" % str
if self.f:
self.f.write("%s \n" % line)
else:
print(line)
def error(self, str):
data = "\n\n##### ERROR ##### %s" % str
if self.f:
self.f.write("%s \n" % data)
else:
print(data)
def log(self, str):
if self.f:
self.f.write("%s \n" % str)
else:
print(str)
#
# Shell Environment
#
class Env(object):
def __init__(self, tests):
self.tests = tests
self.log = Log()
def shell(self, cmd, path=os.getcwd()):
if path:
os.chdir(path)
self.log.log("==== shell session ===========================")
self.log.log("%s> %s" % (path, cmd))
status = subprocess.call("cd %s; %s" % (path, cmd), shell=True,
stdout=self.log.f, stderr=self.log.f)
self.log.log("status = %s" % status)
self.log.log("============================================== \n\n")
return status
def GetOutput(self, cmd, path=os.getcwd()):
if path:
os.chdir(path)
self.log.log("==== shell session ===========================")
self.log.log("%s> %s" % (path, cmd))
status, out = commands.getstatusoutput(cmd)
self.log.log("status = %s" % status)
self.log.log("out = %s" % out)
self.log.log("============================================== \n\n")
return status, out
#
# Pre-commit checker
#
class PreCommitChecker(Env):
def __init__(self, tests):
Env.__init__(self, tests)
#
# Get commands for a given job from the determinator file
#
def get_commands(self, test):
status, out = self.GetOutput(
"build_tools/rocksdb-lego-determinator %s" % test, ".")
return status, out
#
# Run a specific CI job
#
def run_test(self, test):
self.log.caption("Running test %s locally" % test)
# get commands for the CI job determinator
status, cmds = self.get_commands(test)
if status != 0:
self.log.error("Error getting commands for test %s" % test)
return False
# Parse the JSON to extract the commands to run
cmds = re.findall("'shell':'([^\']*)'", cmds)
if len(cmds) == 0:
self.log.log("No commands found")
return False
# Run commands
for cmd in cmds:
# Replace J=<..> with the local environment variable
if "J" in os.environ:
cmd = cmd.replace("J=1", "J=%s" % os.environ["J"])
cmd = cmd.replace("make ", "make -j%s " % os.environ["J"])
# Run the command
status = self.shell(cmd, ".")
if status != 0:
self.log.error("Error running command %s for test %s"
% (cmd, test))
return False
return True
#
# Run specified CI jobs
#
def run_tests(self):
if not self.tests:
self.log.error("Invalid args. Please provide tests")
return False
self.print_separator()
self.print_row("TEST", "RESULT")
self.print_separator()
for test in self.tests:
start_time = time.time()
self.print_test(test)
result = self.run_test(test)
elapsed_min = (time.time() - start_time) / 60
if not result:
self.log.error("Error running test %s" % test)
self.print_result("FAIL (%dm)" % elapsed_min)
return False
self.print_result("PASS (%dm)" % elapsed_min)
self.print_separator()
return True
#
# Print a line
#
def print_separator(self):
print("".ljust(60, "-"))
#
# Print two colums
#
def print_row(self, c0, c1):
print("%s%s" % (c0.ljust(40), c1.ljust(20)))
def print_test(self, test):
print(test.ljust(40), end="")
sys.stdout.flush()
def print_result(self, result):
print(result.ljust(20))
#
# Main
#
parser = argparse.ArgumentParser(description='RocksDB pre-commit checker.')
# <test ....>
parser.add_argument('test', nargs='+',
help='CI test(s) to run. e.g: unit punit asan tsan ubsan')
print("Please follow log %s" % Log.LOG_FILE)
args = parser.parse_args()
checker = PreCommitChecker(args.test)
if not checker.run_tests():
print("Error running tests. Please check log file %s" % Log.LOG_FILE)
sys.exit(1)
sys.exit(0)
| 25.798995
| 78
| 0.52688
|
dcd4e49bc12865f676c32b8621a702c29c0cda1a
| 6,579
|
py
|
Python
|
Server/src/virtualenv/Lib/site-packages/pip/basecommand.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
Server/src/virtualenv/Lib/site-packages/pip/basecommand.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
Server/src/virtualenv/Lib/site-packages/pip/basecommand.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
"""Base Command class, and related routines"""
import os
import sys
import tempfile
import traceback
import time
import optparse
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.log import logger
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.backwardcompat import StringIO
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR)
from pip.util import get_prog
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
def _build_session(self, options):
session = PipSession()
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.add_consumers((logger.DEBUG, log_fp))
else:
log_fp = None
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except PreviousBuildDirError:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if store_log:
log_file_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_file_fp = open_logfile(log_file_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_file_fn = temp.name
log_file_fp = open_logfile(log_file_fn, 'w')
logger.fatal('Storing debug log for failure in %s' % log_file_fn)
log_file_fp.write(text)
log_file_fp.close()
if log_fp is not None:
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
| 32.569307
| 86
| 0.599483
|
13b62d4511f4cc83a8c14532cd3a71cb9b3d69a0
| 3,720
|
py
|
Python
|
tfx/components/example_gen/custom_executors/avro_component_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | 1
|
2022-03-29T23:06:54.000Z
|
2022-03-29T23:06:54.000Z
|
tfx/components/example_gen/custom_executors/avro_component_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/example_gen/custom_executors/avro_component_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for using avro_executor with example_gen component."""
import os
from unittest import mock
import tensorflow as tf
from tfx.components.example_gen.component import FileBasedExampleGen
from tfx.components.example_gen.custom_executors import avro_executor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import publisher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.proto import example_gen_pb2
from tfx.utils import name_utils
from ml_metadata.proto import metadata_store_pb2
class ExampleGenComponentWithAvroExecutorTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Create input_base.
input_data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'testdata')
self.avro_dir_path = os.path.join(input_data_dir, 'external')
# Create input_config.
self.input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='avro', pattern='avro/*.avro'),
])
# Create output_config.
self.output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
]))
@mock.patch.object(publisher, 'Publisher')
def testRun(self, mock_publisher):
mock_publisher.return_value.publish_execution.return_value = {}
example_gen = FileBasedExampleGen(
custom_executor_spec=executor_spec.ExecutorClassSpec(
avro_executor.Executor),
input_base=self.avro_dir_path,
input_config=self.input_config,
output_config=self.output_config).with_id('AvroExampleGen')
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
pipeline_root = os.path.join(output_data_dir, 'Test')
fileio.makedirs(pipeline_root)
pipeline_info = data_types.PipelineInfo(
pipeline_name='Test', pipeline_root=pipeline_root, run_id='123')
driver_args = data_types.DriverArgs(enable_cache=True)
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.SetInParent()
metadata_connection = metadata.Metadata(connection_config)
launcher = in_process_component_launcher.InProcessComponentLauncher.create(
component=example_gen,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=[],
additional_pipeline_args={})
self.assertEqual(
launcher._component_info.component_type,
name_utils.get_full_name(FileBasedExampleGen))
launcher.launch()
mock_publisher.return_value.publish_execution.assert_called_once()
# Check output paths.
self.assertTrue(fileio.exists(os.path.join(pipeline_root, example_gen.id)))
if __name__ == '__main__':
tf.test.main()
| 37.2
| 80
| 0.755914
|
edda9bbdebf1499633c9f39e9c2f8d54767542a8
| 3,164
|
py
|
Python
|
supergnova.py
|
YiliangTracyZhang/SUPERGNOVA
|
a7f40c23a3e1a0ed7fefd5bed9ac55b8414a9e0f
|
[
"MIT"
] | 15
|
2020-05-11T01:10:47.000Z
|
2022-01-24T07:53:20.000Z
|
supergnova.py
|
YiliangTracyZhang/SUPERGNOVA
|
a7f40c23a3e1a0ed7fefd5bed9ac55b8414a9e0f
|
[
"MIT"
] | 6
|
2020-05-11T00:22:01.000Z
|
2021-09-08T12:32:09.000Z
|
supergnova.py
|
YiliangTracyZhang/SUPERGNOVA
|
a7f40c23a3e1a0ed7fefd5bed9ac55b8414a9e0f
|
[
"MIT"
] | 1
|
2020-09-07T15:26:42.000Z
|
2020-09-07T15:26:42.000Z
|
#!/usr/bin/env python
'''
Local genetic correlation estimation
SUPERGNOVA
Created on 2020-5-4
Happy birthday PKU!
@author: Yiliang
'''
import argparse, os.path, sys
from subprocess import call
import multiprocessing
import pandas as pd
import numpy as np
from prep import prep
from ldsc_thin import ldscore
from heritability import heritability
from pheno import pheno
from calculate import calculate
try:
x = pd.DataFrame({'A': [1, 2, 3]})
x.drop_duplicates(subset='A')
except TypeError:
raise ImportError('SUPERGNOVA requires pandas version > 0.15.2')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('precision', 4)
pd.set_option('max_colwidth',1000)
np.set_printoptions(linewidth=1000)
np.set_printoptions(precision=4)
# returns whether the parent directory of path exists
def parent_dir_exists(path):
return os.path.exists(os.path.abspath(os.path.join(path, os.pardir)))
def pipeline(args):
pd.options.mode.chained_assignment = None
# Sanity check args
if not parent_dir_exists(args.out):
raise ValueError('--out flag points to an invalid path.')
print('Preparing files for analysis...')
gwas_snps, bed, N1, N2 = prep(args.bfile, args.partition, args.sumstats1, args.sumstats2, args.N1, args.N2)
print('Calculating LD scores...')
ld_scores = ldscore(args.bfile, gwas_snps)
gwas_snps = gwas_snps[gwas_snps['SNP'].isin(ld_scores['SNP'])]
print('{} SNPs included in our analysis...'.format(len(gwas_snps)))
print('Calculating heritability...')
h_1, h_2 = heritability(gwas_snps, ld_scores, N1, N2)
print('The genome-wide heritability of the first trait is {}.\nThe genome-wide heritability of the second trait is {}.'.format(h_1, h_2))
print('Calculating phenotypic correlation...')
pheno_corr, pheno_corr_var = pheno(gwas_snps, ld_scores, N1, N2, h_1, h_2)
print('Calculating local genetic covariance...')
out = calculate(args.bfile, bed, args.thread, gwas_snps, ld_scores, N1, N2, pheno_corr, pheno_corr_var)
out.to_csv(args.out, sep=' ', na_rep='NA', index=False)
parser = argparse.ArgumentParser()
parser.add_argument('sumstats1',
help='The first sumstats file.')
parser.add_argument('sumstats2',
help='The second sumstats file.')
parser.add_argument('--bfile', required=True, type=str,
help='Prefix for Plink .bed/.bim/.fam file.')
parser.add_argument('--partition', required=True, type=str,
help='Genome partition file in bed format')
parser.add_argument('--N1', type=int,
help='N of the sumstats1 file. If not provided, this value will be inferred '
'from the sumstats1 arg.')
parser.add_argument('--N2', type=int,
help='N of the sumstats2 file. If not provided, this value will be inferred '
'from the sumstats2 arg.')
parser.add_argument('--out', required=True, type=str,
help='Location to output results.')
parser.add_argument('--thread', default= multiprocessing.cpu_count(), type=int,
help='Thread numbers used for calculation. Default = CPU numbers.')
if __name__ == '__main__':
pipeline(parser.parse_args())
| 33.305263
| 141
| 0.724399
|
017c03371cdbd061e19d38b5bb000475f49a7cad
| 21
|
py
|
Python
|
aliyun-python-sdk-nas/aliyunsdknas/__init__.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2021-07-27T13:23:27.000Z
|
2021-07-27T13:23:27.000Z
|
norminette/version.py
|
N0ich/norminette
|
a19fe6b057591ef3faca469c62d3454242ce3d77
|
[
"MIT"
] | null | null | null |
norminette/version.py
|
N0ich/norminette
|
a19fe6b057591ef3faca469c62d3454242ce3d77
|
[
"MIT"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
__version__ = "3.1.2"
| 21
| 21
| 0.666667
|
643380b5db062df9515193b5a0b4d2baa6f64478
| 10,223
|
py
|
Python
|
auto_changelog/repository.py
|
stdtom/auto-changelog
|
1e2d26671569555a3d03e70e897d6f12ad61a108
|
[
"MIT"
] | null | null | null |
auto_changelog/repository.py
|
stdtom/auto-changelog
|
1e2d26671569555a3d03e70e897d6f12ad61a108
|
[
"MIT"
] | 7
|
2020-10-03T21:03:51.000Z
|
2020-10-20T18:34:49.000Z
|
auto_changelog/repository.py
|
stdtom/auto-changelog
|
1e2d26671569555a3d03e70e897d6f12ad61a108
|
[
"MIT"
] | null | null | null |
import logging
import re
from datetime import date
from hashlib import sha256
from typing import Dict, List, Tuple, Any, Optional
from urllib.parse import urljoin
from git import Repo, Commit, TagReference
from auto_changelog.domain_model import RepositoryInterface, Changelog, default_tag_pattern
class GitRepository(RepositoryInterface):
def __init__(
self,
repository_path,
latest_version: Optional[str] = None,
skip_unreleased: bool = True,
tag_prefix: str = "",
tag_pattern: Optional[str] = None,
):
self.repository = Repo(repository_path)
self.tag_prefix = tag_prefix
self.tag_pattern = tag_pattern
self.commit_tags_index = self._init_commit_tags_index(self.repository, self.tag_prefix, self.tag_pattern)
# in case of defined latest version, unreleased is used as latest release
self._skip_unreleased = skip_unreleased and not bool(latest_version)
self._latest_version = latest_version or None
def generate_changelog(
self,
title: str = "Changelog",
description: str = "",
remote: str = "origin",
issue_pattern: Optional[str] = None,
issue_url: Optional[str] = None,
diff_url: Optional[str] = None,
starting_commit: str = "",
stopping_commit: str = "HEAD",
) -> Changelog:
locallogger = logging.getLogger("repository.generate_changelog")
issue_url = issue_url or self._issue_from_git_remote_url(remote)
diff_url = diff_url or self._diff_from_git_remote_url(remote)
changelog = Changelog(title, description, issue_pattern, issue_url, self.tag_prefix, self.tag_pattern)
if self._repository_is_empty():
locallogger.info("Repository is empty.")
return changelog
iter_rev = self._get_iter_rev(starting_commit, stopping_commit)
commits = self.repository.iter_commits(iter_rev)
# Some thoughts here
# First we need to check if all commits are "released". If not, we have to create our special "Unreleased"
# release. Then we simply iter over all commits, assign them to current release or create new if we find it.
first_commit = True
skip = self._skip_unreleased
locallogger.debug("Start iterating commits")
for commit in commits:
sha = commit.hexsha[0:7]
locallogger.debug("Found commit {}".format(sha))
if skip and commit not in self.commit_tags_index:
locallogger.debug("Skipping unreleased commit " + sha)
continue
else:
skip = False
if first_commit and commit not in self.commit_tags_index:
# if no last version specified by the user => consider HEAD
if not self._latest_version:
locallogger.debug("Adding release 'unreleased'")
changelog.add_release("Unreleased", "HEAD", date.today(), sha256())
else:
locallogger.debug("Adding release '{}'".format(self._latest_version))
changelog.add_release(self._latest_version, self._latest_version, date.today(), sha256())
first_commit = False
if commit in self.commit_tags_index:
attributes = self._extract_release_args(commit, self.commit_tags_index[commit])
locallogger.debug("Adding release '{}' with attributes {}".format(attributes[0], attributes))
changelog.add_release(*attributes)
attributes = self._extract_note_args(commit)
locallogger.debug("Adding commit {} with attributes {}".format(sha, attributes))
changelog.add_note(*attributes)
# create the compare url for each release
releases = changelog.releases
# we are using len(changelog.releases) - 1 because there is not compare url for the oldest version
if diff_url is not None: # if links are off
for release_index in reversed(range(len(changelog.releases) - 1)):
releases[release_index].set_compare_url(diff_url, releases[release_index + 1].title)
return changelog
def _issue_from_git_remote_url(self, remote: str) -> Optional[str]:
""" Creates issue url with {id} format key """
try:
url = self._remote_url(remote)
return url + "/issues/{id}"
except ValueError as e:
logging.error("%s. Turning off issue links.", e)
return None
def _diff_from_git_remote_url(self, remote: str):
try:
url = self._remote_url(remote)
return urljoin(url + "/", "compare/{previous}...{current}")
except ValueError as e:
logging.error("%s. Turning off compare url links.", e)
return None
def _remote_url(self, remote: str) -> str:
""" Extract remote url from remote url """
url = self._get_git_url(remote=remote)
url = GitRepository._sanitize_remote_url(url)
return url
@staticmethod
def _sanitize_remote_url(remote: str) -> str:
# 'git@github.com:Michael-F-Bryan/auto-changelog.git' -> 'https://github.com/Michael-F-Bryan/auto-changelog'
# 'https://github.com/Michael-F-Bryan/auto-changelog.git' -> 'https://github.com/Michael-F-Bryan/auto-changelog'
return re.sub(r"^(https|git|ssh)(:\/\/|@)(.*@)?([^\/:]+)[\/:]([^\/:]+)\/(.+).git$", r"https://\4/\5/\6", remote)
# This part is hard to mock, separate method is nice approach how to overcome this problem
def _get_git_url(self, remote: str) -> str:
remote_config = self.repository.remote(name=remote).config_reader
# remote url can be in one of this three options
# Test is the option exits before access it, otherwise the program crashes
if remote_config.has_option("url"):
return remote_config.get("url")
elif remote_config.has_option("pushurl"):
return remote_config.get("pushurl")
elif remote_config.has_option("pullurl"):
return remote_config.get("pullurl")
else:
return ""
def _get_iter_rev(self, starting_commit: str, stopping_commit: str):
if starting_commit:
c = self.repository.commit(starting_commit)
if not c.parents:
# starting_commit is initial commit,
# treat as default
starting_commit = ""
else:
# iter_commits iters from the first rev to the second rev,
# but not contains the second rev.
# Here we set the second rev to its previous one then the
# second rev would be included.
starting_commit = "{}~1".format(starting_commit)
iter_rev = "{0}...{1}".format(stopping_commit, starting_commit) if starting_commit else stopping_commit
return iter_rev
def _repository_is_empty(self):
return not bool(self.repository.references)
@staticmethod
def _init_commit_tags_index(
repo: Repo, tag_prefix: str, tag_pattern: Optional[str] = None
) -> Dict[Commit, List[TagReference]]:
""" Create reverse index """
reverse_tag_index = {}
semver_regex = default_tag_pattern
for tagref in repo.tags:
tag_name = tagref.name
commit = tagref.commit
consider_tag = False
# consider & remove the prefix if we found one
if tag_name.startswith(tag_prefix):
tag_name = tag_name.replace(tag_prefix, "")
# if user specified a tag pattern => consider it
if tag_pattern is not None:
if re.fullmatch(tag_pattern, tag_name):
consider_tag = True
# no tag pattern specified by user => check semver semantic
elif re.fullmatch(semver_regex, tag_name):
consider_tag = True
# good format of the tag => consider it
if consider_tag:
if commit not in reverse_tag_index:
reverse_tag_index[commit] = []
reverse_tag_index[commit].append(tagref)
return reverse_tag_index
@staticmethod
def _extract_release_args(commit, tags) -> Tuple[str, str, Any, Any]:
""" Extracts arguments for release """
title = ", ".join(map(lambda tag: "{}".format(tag.name), tags))
date_ = commit.authored_datetime.date()
sha = commit.hexsha
# TODO parse message, be carefull about commit message and tags message
return title, title, date_, sha
@staticmethod
def _extract_note_args(commit) -> Tuple[str, str, str, str, str, str]:
""" Extracts arguments for release Note from commit """
sha = commit.hexsha
message = commit.message
type_, scope, description, body, footer = GitRepository._parse_conventional_commit(message)
return sha, type_, description, scope, body, footer
@staticmethod
def _parse_conventional_commit(message: str) -> Tuple[str, str, str, str, str]:
type_ = scope = description = body_footer = body = footer = ""
# TODO this is less restrictive version of re. I have somewhere more restrictive one, maybe as option?
match = re.match(r"^(\w+)(\(\w+\))?!?: (.*)(\n\n[\w\W]+)?$", message)
if match:
type_, scope, description, body_footer = match.groups(default="")
else:
locallogger = logging.getLogger("repository._parse_conventional_commit")
locallogger.debug("Commit message did not match expected pattern: {}".format(message))
if scope:
scope = scope[1:-1]
if body_footer:
bf_match = re.match(r"^(\n\n[\w\W]+?)?(\n\n([a-zA-Z-]+|BREAKING[- ]CHANGE)(: | #)[\w\W]+)$", body_footer)
if bf_match:
result = bf_match.groups(default="")
body = result[0][2:]
footer = result[1][2:]
else:
body = body_footer[2:]
return type_, scope, description, body, footer
| 44.641921
| 120
| 0.618605
|
b3f29f81a1d42fbd0483bc103b184160732e777e
| 4,195
|
py
|
Python
|
evaluation_t5.py
|
nunziati/open-question-answering-newsqa
|
85c6acf43163e6e0de8975b5047ddfa3b09f03d1
|
[
"MIT"
] | 2
|
2022-03-15T17:48:25.000Z
|
2022-03-15T17:48:54.000Z
|
evaluation_t5.py
|
nunziati/open-question-answering-newsqa
|
85c6acf43163e6e0de8975b5047ddfa3b09f03d1
|
[
"MIT"
] | null | null | null |
evaluation_t5.py
|
nunziati/open-question-answering-newsqa
|
85c6acf43163e6e0de8975b5047ddfa3b09f03d1
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from tqdm import tqdm
import torch
from datasets import load_dataset
from transformers import T5ForConditionalGeneration, T5Tokenizer
from torch.utils.data import DataLoader
import argparse
from MyDataset import Dataset
import MyDataset
import argparse
dataset_instruction = {
"duorc": {
"parser": MyDataset.DatasetMap.duorc,
"test_set": "test"
},
"squad": {
"parser": MyDataset.DatasetMap.squad,
"test_set": "validation"
}
}
def parse_command_line_arguments():
parser = argparse.ArgumentParser(
description='CLI for evaluating T5 T2T model')
parser.add_argument('--t5_model', type=str, default="results/t5-base/checkpoint-31",
help="What type of T5 model do you want use?")
parser.add_argument('--dataset', type=str, default='duorc-SelfRC',
help="Dataset to be used, if more level provided for the dataset use the '-' token, e.g. duorc-SelfRC")
parser.add_argument('--batch_size', type=int, default=16,
help='mini-batch size (default: 16)')
parser.add_argument('--workers', type=int, default=10,
help='number of working units used to load the data (default: 10)')
parser.add_argument('--device', default='cuda', type=str,
help='device to be used for computations (in {cpu, cuda:0, cuda:1, ...}, default: cpu)')
parser.add_argument('--max_input_length', type=int, default=512,
help='Maximum lenght of input text, (default: 512, maximum admitted: 512)')
parser.add_argument('--seed', type=int, default=7,
help='Seed for random initialization (default: 7)')
parsed_arguments = parser.parse_args()
return parsed_arguments
if __name__ == '__main__':
args = parse_command_line_arguments()
for k, v in args.__dict__.items():
print(k + '=' + str(v))
dataset_info = args.dataset.split("-")
name = dataset_info[0]
_data = None
if len(dataset_info) == 1:
_data = load_dataset(name)
else:
_data = load_dataset(name, dataset_info[1])
model = T5ForConditionalGeneration.from_pretrained(args.t5_model)
tokenizer = T5Tokenizer.from_pretrained(args.t5_model)
_test_set = Dataset(_data[dataset_instruction[name]["test_set"]], tokenizer, parser=dataset_instruction[name]["parser"])
my_testset_dataloader = DataLoader(_test_set, batch_size=args.batch_size, num_workers=args.workers, collate_fn=lambda data: _test_set.pack_minibatch(data))
device = args.device
model.to(device)
model.eval()
with torch.no_grad():
model_predictions_encoded = []
target_encoded = []
for contexts, questions, answers in tqdm(my_testset_dataloader):
inputs = list(map(lambda tuple: f"question: {tuple[0]} context:{tuple[1]}", zip(
questions, contexts)))
encoded_inputs = tokenizer(
inputs,
padding="longest",
max_length=args.max_input_length,
truncation=True,
return_tensors="pt",
)
encoded_targets = tokenizer(
answers,
padding="longest",
max_length=args.max_input_length,
truncation=True,
return_tensors="pt",
)
encoded_inputs, attention_mask = encoded_inputs.input_ids, encoded_inputs.attention_mask
encoded_targets = encoded_targets.input_ids
encoded_inputs = encoded_inputs.to(device)
encoded_targets = encoded_targets.to(device)
attention_mask = attention_mask.to(device)
model_predictions = model.generate(
input_ids=encoded_inputs, attention_mask=attention_mask)
model_predictions_encoded += model_predictions.tolist()
target_encoded += encoded_targets.tolist()
f1, exact_match = _test_set.evaluate(
target_encoded, model_predictions_encoded)
print(f"\t F1 = {f1:.2f}, EM = {exact_match:.2f}")
| 37.455357
| 159
| 0.633135
|
8232b249ef72b53ec3ca5cc4fdc42ad6b50c9826
| 24,143
|
py
|
Python
|
cocoeval.py
|
gcardosoy/detectron2
|
a5e17ef84c1c511007670f886df631c074809726
|
[
"Apache-2.0"
] | null | null | null |
cocoeval.py
|
gcardosoy/detectron2
|
a5e17ef84c1c511007670f886df631c074809726
|
[
"Apache-2.0"
] | null | null | null |
cocoeval.py
|
gcardosoy/detectron2
|
a5e17ef84c1c511007670f886df631c074809726
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'tsungyi'
import numpy as np
import datetime
import time
from collections import defaultdict
from . import mask as maskUtils
import copy
class COCOeval:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt=dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d,g,iscrowd)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = p.kpt_oks_sigmas
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]; yd = d[1::3]
if k1>0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
if k1 > 0:
e=e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return None
for g in gt:
if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if not len(ious)==0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
# if match successful and best so far, store appropriately
iou=ious[dind,gind]
m=gind
# if match made store id of match for both dt and gt
if m ==-1:
continue
dtIg[tind,dind] = gtIg[m]
dtm[tind,dind] = gt[m]['id']
gtm[tind,m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.75, int(np.round((0.75 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
| 45.127103
| 131
| 0.513896
|
0b29649b61db77da0b93b9ee22c9960e8637d3d4
| 135
|
py
|
Python
|
eks_node_migrator.py
|
ahrizvi/eks-node-migrator
|
2972c3011f81cda9c78dabb13ec6449ce70506ab
|
[
"Apache-2.0"
] | 1
|
2021-11-23T22:34:23.000Z
|
2021-11-23T22:34:23.000Z
|
eks_node_migrator.py
|
ahrizvi/eks-node-migrator
|
2972c3011f81cda9c78dabb13ec6449ce70506ab
|
[
"Apache-2.0"
] | null | null | null |
eks_node_migrator.py
|
ahrizvi/eks-node-migrator
|
2972c3011f81cda9c78dabb13ec6449ce70506ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
from eksmigrator.cli import main
if __name__ == "__main__":
main(sys.argv[1:])
# Test CI Build
| 15
| 32
| 0.696296
|
0d1b368a39f4ff83bd9d1e4ed657f07dce6389b1
| 3,123
|
py
|
Python
|
pytar/pytar.py
|
douglas-archives/pytar
|
c53f801487cbf5e3eda02ccaf53d119b37f744a5
|
[
"BSD-3-Clause"
] | null | null | null |
pytar/pytar.py
|
douglas-archives/pytar
|
c53f801487cbf5e3eda02ccaf53d119b37f744a5
|
[
"BSD-3-Clause"
] | 3
|
2018-01-13T21:04:13.000Z
|
2018-01-13T21:04:13.000Z
|
pytar/pytar.py
|
douglas-archives/pytar
|
c53f801487cbf5e3eda02ccaf53d119b37f744a5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import tarfile
from datetime import datetime
PY3 = sys.version > '3'
def list_contents(tar_file):
"""
Listing contents from a tar file. This implementation is based on
Python core code, (tarfile.list).
"""
members = tar_file.getmembers()
output = ''
for tarinfo in members:
line = ''
line += tarfile.filemode(tarinfo.mode) + ' '
owner = '{0}/{1}'.format(
tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid
)
line += owner
if tarinfo.ischr() or tarinfo.isblk():
dev = '{0},{1}'.format(tarinfo.devmajor, tarinfo.devminor)
line += dev.center(10) + ' '
else:
line += '{0:10}'.format(tarinfo.size) + ' '
line += str(datetime.fromtimestamp(tarinfo.mtime)) + ' '
line += '{0}{1}{2}'.format(
tarinfo.name if PY3 else tarinfo.name.decode('utf-8'),
('/' if tarinfo.isdir() else ''),
' '
)
if tarinfo.issym():
line += '-> {0}'.format(tarinfo.linkname)
if tarinfo.islnk():
line += 'link to {0}'.format(tarinfo.linkname)
output += '{0}\n'.format(line)
return output or 'Nothing to output.'
def pytar_extract(tar_file_name, extract_path=None, verbose=False):
is_a_valid_tarfile = False
messages = {
'path_does_not_exist': {
'status': 'fail',
'message': 'ERROR: This path does not exist.'
},
'is_dir': {
'status': 'fail',
'message': 'ERROR: This is a directory not a tar file.'
},
'not_a_tar_file': {
'status': 'fail',
'message': 'ERROR: This may not be a tar file or may be corrupted.'
},
'empty_tar_file': {
'status': 'fail',
'message': 'This is an empty tar file.'
},
'wrong_extract_path': {
'status': 'fail',
'message': 'ERROR: The extract path does not exist.'
},
'success': {
'status': 'success',
'message': 'Successfully extracted.',
'verbose': ''
},
}
if not os.path.exists(tar_file_name):
return messages['path_does_not_exist']
if os.path.isdir(tar_file_name):
return messages['is_dir']
if not extract_path:
path = os.path.dirname(tar_file_name)
else:
if os.path.isdir(extract_path):
path = extract_path
else:
return messages['wrong_extract_path']
is_a_valid_tarfile = tarfile.is_tarfile(tar_file_name)
if not is_a_valid_tarfile:
return messages['not_a_tar_file']
tar_file = tarfile.open(tar_file_name)
members = tar_file.getmembers()
if not members:
return messages['empty_tar_file']
tar_file.extractall(path, members)
if verbose:
messages['success']['verbose'] = list_contents(tar_file)
tar_file.close()
return messages['success']
| 27.394737
| 79
| 0.557477
|
85b6ebf36cb629e3e337eacd878160a22f8a34fd
| 992
|
py
|
Python
|
decisions/csgo_decider.py
|
LavinaVRovine/hazard
|
e0408374dc0b76f8b9a0107f5f12cca2d4c033ef
|
[
"MIT"
] | 1
|
2020-10-05T14:19:35.000Z
|
2020-10-05T14:19:35.000Z
|
decisions/csgo_decider.py
|
LavinaVRovine/hazard
|
e0408374dc0b76f8b9a0107f5f12cca2d4c033ef
|
[
"MIT"
] | null | null | null |
decisions/csgo_decider.py
|
LavinaVRovine/hazard
|
e0408374dc0b76f8b9a0107f5f12cca2d4c033ef
|
[
"MIT"
] | null | null | null |
from decisions.decider import Decider
from data.get_csgo_data import get_csgo_data
from helpers.custom_exceptions import TeamNotFound, NoMatchData
from data.csgo_data_creator import CSGOData
class CSGODecider(Decider):
def __init__(self, match_row, db_location, data_handler: CSGOData):
super().__init__(match_row, db_location)
self.df = get_csgo_data()
self.data_handler = data_handler
def decide_match_action(self, predictor) -> dict:
try:
match_row = self.data_handler.create_match_stats_row(
self.team_1_name, self.team_2_name
)
except (TeamNotFound, NoMatchData) as e:
print(e)
return {"team1": self.team_1_name, "team2": self.team_2_name}
else:
# no error
match_row_df = match_row.to_frame().T.reindex(
columns=predictor.training_columns
)
return super().decide(predictor, match_row_df.fillna(0))
| 34.206897
| 73
| 0.660282
|
455374c0e6052ce457b53773e38d5167a64582bb
| 716
|
py
|
Python
|
seqauto/migrations/0026_auto_20210524_1225.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
seqauto/migrations/0026_auto_20210524_1225.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
seqauto/migrations/0026_auto_20210524_1225.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
# Generated by Django 3.2.1 on 2021-05-24 02:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seqauto', '0025_auto_20210429_1030'),
]
operations = [
migrations.AddField(
model_name='enrichmentkit',
name='sample_variants_type',
field=models.CharField(choices=[('U', 'Unknown'), ('G', 'Germline'), ('M', 'Mixed (Single Sample)'), ('S', 'Somatic only (Tumor minus normal)')], default='U', max_length=1),
),
migrations.AddField(
model_name='enrichmentkit',
name='variant_zygosity_count',
field=models.BooleanField(default=True),
),
]
| 29.833333
| 185
| 0.597765
|
0d08c8a63b54cb7c2104f06446934c62508ccdeb
| 386
|
py
|
Python
|
100DaysOfDays/Dia03/ex14.py
|
giselemanuel/programming-challenges
|
c7f37685989190b71ed7ec773f1dc407fd961cf1
|
[
"MIT"
] | null | null | null |
100DaysOfDays/Dia03/ex14.py
|
giselemanuel/programming-challenges
|
c7f37685989190b71ed7ec773f1dc407fd961cf1
|
[
"MIT"
] | null | null | null |
100DaysOfDays/Dia03/ex14.py
|
giselemanuel/programming-challenges
|
c7f37685989190b71ed7ec773f1dc407fd961cf1
|
[
"MIT"
] | null | null | null |
"""
Exercício Python 14: Escreva um programa que converta uma temperatura digitando
em graus Celsius e converta para graus Fahrenheit.
"""
print("-" * 50)
print(f'{"Conversor de temperatura":^50}')
print("-" * 50)
temperatura_c = float(input("Insira a temperatura em ºC : "))
temperatura_f = (temperatura_c * (9/5)) + 32
print(f"Temperaturas:\n{temperatura_c}ºC\n{temperatura_f}ºF")
| 27.571429
| 79
| 0.722798
|
caf0fada7d3792bba3513627bf3b45d08f6c08ff
| 4,170
|
py
|
Python
|
loihi_network.py
|
russelljjarvis/brian2_loihi
|
bb94ab014aabefca0861613ba0343c13619668f3
|
[
"MIT"
] | 6
|
2021-05-03T06:20:07.000Z
|
2022-01-25T14:17:20.000Z
|
loihi_network.py
|
russelljjarvis/brian2_loihi
|
bb94ab014aabefca0861613ba0343c13619668f3
|
[
"MIT"
] | 17
|
2021-04-28T08:54:23.000Z
|
2021-12-08T19:30:46.000Z
|
loihi_network.py
|
russelljjarvis/brian2_loihi
|
bb94ab014aabefca0861613ba0343c13619668f3
|
[
"MIT"
] | 5
|
2021-07-11T23:15:03.000Z
|
2022-03-17T13:48:57.000Z
|
from brian2 import Network, ExplicitStateUpdater, StateUpdateMethod, defaultclock, ms
import warnings
class LoihiNetwork(Network):
"""
The LoihiNetwork extends the Network class from Brian2.
Note that it is important to use the LoihiNetwork class and not the magic network.
Methods
-------
__init__(*objs, **kwds)
Initializes the LoihiNetwork and the Network
run(duration, **kwargs)
Checks for problems and runs the Brian network simulation
"""
def __init__(self, *objs, **kwds):
""" Initializes the LoihiNetwork and the Network
This method registers two ExplicitStateUpdater as StateUpdateMethod. These update
methods are used by Loihi to integrate diffeential equations. Further, the dt is
set to 1, again to match a Loihi simulation. Afterwards the __init__() method from
the Brian2 Network is called, initializing a default Brian2 network. Finally, the
default schedule from Brian2 is reordered to match Loihi. All arguments are passed
to the parent init method.
Parameters
----------
*objs :
All arguments defined by the parent class
**kwds : optional
All keyword arguments defined by the parent class
"""
# Define first order forward euler, if not already defined
if ('forward_euler' not in StateUpdateMethod.stateupdaters):
eq_forward_euler = '''
x_new = x + dt * f(x,t)
'''
forward_euler = ExplicitStateUpdater(eq_forward_euler, stochastic='none')
StateUpdateMethod.register('forward_euler', forward_euler)
# Define exact state updater for the pre/post traces for learning, if not already defined
if ('exact_synapse' not in StateUpdateMethod.stateupdaters):
eq_exact_synapse = '''
x_0 = dt*f(x,t)
x_new = int(x_0)
'''
exact_synapse = ExplicitStateUpdater(eq_exact_synapse, stochastic='none')
StateUpdateMethod.register('exact_synapse', exact_synapse)
# Set default clock dt
defaultclock.dt = 1*ms
# Call super init
super().__init__(*objs, **kwds)
# Reorder schedule to match Loihi
self.schedule = ['start', 'synapses', 'groups', 'thresholds', 'resets', 'end']
def run(self, duration, **kwargs):
""" Checks for problems and runs the Brian network simulation
The run method overwrites the run method from the Network class. Just before running
the simulation, it checks if the most important settings are still valid. If not, a
warning is shown. The user should be able to choose other settings, but should be warned
that results can then vary from Loihi. Afterwards the parent run() method is called.
The duration is modified, such that the LoihiNetwork run() method will only take an integer
without Brian's time information (e.g. ms). All keyword arguments are passed to the
parent run() method.
Parameters
----------
duration : int
Duration of the simulation as an integer value, no time (e.g. ms) should to be added
**kwargs : optional
All keyword arguments defined by the parent method
Raises
------
Warning
If defautlclock dt value has changed and is not set to 1ms any more
Warning
If the schedule has changed and is not in the Loihi-like order any more
"""
# Check if the user has manually changed defaultclock and print warning
if (defaultclock.dt != 1*ms):
warnings.warn("The defaultclock.dt is not set to 1*ms, this may cause results which deviate from Loihi.")
# Check if the user has manually changed schedule and print warning
if (self.schedule != ['start', 'synapses', 'groups', 'thresholds', 'resets', 'end']):
warnings.warn("The schedule has changed, this may cause results which deviate from Loihi.")
# Call run method from Brian Network
super().run(duration*ms, **kwargs)
| 42.55102
| 117
| 0.647482
|
6d8136f7c7a113ce056948b917c046e58618c707
| 1,691
|
py
|
Python
|
config/wsgi.py
|
naeunpark/nomadgram
|
3b3f245fd627ae160ff9492806799ddf22cbdbca
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
naeunpark/nomadgram
|
3b3f245fd627ae160ff9492806799ddf22cbdbca
|
[
"MIT"
] | 5
|
2020-06-05T19:18:08.000Z
|
2021-09-08T00:15:11.000Z
|
config/wsgi.py
|
naeunpark/nomadgram
|
3b3f245fd627ae160ff9492806799ddf22cbdbca
|
[
"MIT"
] | null | null | null |
"""
WSGI config for NOMADGRAM project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# nomadgram directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'nomadgram'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 41.243902
| 79
| 0.797161
|
fb7f9b253725ebdd1121d2d9e9879925e74f7398
| 789
|
py
|
Python
|
setup.py
|
kkristof200/swagger_scraper
|
8d63b04b89383743da823a92b472fa5078264328
|
[
"MIT"
] | null | null | null |
setup.py
|
kkristof200/swagger_scraper
|
8d63b04b89383743da823a92b472fa5078264328
|
[
"MIT"
] | null | null | null |
setup.py
|
kkristof200/swagger_scraper
|
8d63b04b89383743da823a92b472fa5078264328
|
[
"MIT"
] | null | null | null |
import setuptools, os
readme_path = os.path.join(os.getcwd(), "README.md")
if os.path.exists(readme_path):
with open(readme_path, "r") as f:
long_description = f.read()
else:
long_description = 'swagger_scraper'
setuptools.setup(
name="swagger_scraper",
version="0.0.1",
author="Kristof",
description="scrape swagger htmls into python files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kkristof200/swagger_scraper",
packages=setuptools.find_packages(),
install_requires=[],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5',
)
| 30.346154
| 57
| 0.675539
|
c98edb2dac482f9187081d0879f49a55a468c796
| 2,170
|
py
|
Python
|
docs/demos/long_callback_examples/long_callback_celery_progress_bar.py
|
neumann-nico/dash-labs
|
0bb9ef244919bf8882f97b183f0c7ab0f88762bb
|
[
"MIT"
] | 1
|
2021-09-01T12:53:15.000Z
|
2021-09-01T12:53:15.000Z
|
docs/demos/long_callback_examples/long_callback_celery_progress_bar.py
|
neumann-nico/dash-labs
|
0bb9ef244919bf8882f97b183f0c7ab0f88762bb
|
[
"MIT"
] | null | null | null |
docs/demos/long_callback_examples/long_callback_celery_progress_bar.py
|
neumann-nico/dash-labs
|
0bb9ef244919bf8882f97b183f0c7ab0f88762bb
|
[
"MIT"
] | null | null | null |
import time
import dash
import dash_html_components as html
import dash_labs as dl
from dash_labs.plugins import DiskcacheCachingCallbackManager, CeleryCallbackManager
# ## Celery on RabbitMQ
# from celery import Celery
# celery_app = Celery(__name__, backend='rpc://', broker='pyamqp://')
# long_callback_manager = CeleryCallbackManager(celery_app)
# ## Celery on Redis
# from celery import Celery
# celery_app = Celery(
# __name__, broker='redis://localhost:6379/0', backend='redis://localhost:6379/1'
# )
# long_callback_manager = CeleryCallbackManager(celery_app)
## Diskcache
import diskcache
cache = diskcache.Cache("./cache")
long_callback_manager = DiskcacheCachingCallbackManager(cache)
app = dash.Dash(
__name__,
plugins=[
dl.plugins.FlexibleCallbacks(),
dl.plugins.HiddenComponents(),
dl.plugins.LongCallback(long_callback_manager),
],
)
app.layout = html.Div(
[
html.Div(
[
html.P(id="paragraph_id", children=["Button not clicked"]),
html.Progress(id="progress_bar"),
]
),
html.Button(id="button_id", children="Run Job!"),
html.Button(id="cancel_button_id", children="Cancel Running Job!"),
]
)
@app.long_callback(
output=dl.Output("paragraph_id", "children"),
args=dl.Input("button_id", "n_clicks"),
running=[
(dl.Output("button_id", "disabled"), True, False),
(dl.Output("cancel_button_id", "disabled"), False, True),
(
dl.Output("paragraph_id", "style"),
{"visibility": "hidden"},
{"visibility": "visible"},
),
(
dl.Output("progress_bar", "style"),
{"visibility": "visible"},
{"visibility": "hidden"},
),
],
cancel=[dl.Input("cancel_button_id", "n_clicks")],
progress=dl.Output("progress_bar", ("value", "max")),
)
def callback(set_progress, n_clicks):
total = 10
for i in range(total):
time.sleep(0.5)
set_progress((str(i + 1), str(total)))
return [f"Clicked {n_clicks} times"]
if __name__ == "__main__":
app.run_server(debug=True)
| 27.820513
| 85
| 0.624885
|
cd30e423b3ab441d827805104d7bea43b51e167a
| 1,317
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/digitaltwins/v20200301preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/digitaltwins/v20200301preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/digitaltwins/v20200301preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .digital_twin import *
from .digital_twins_endpoint import *
from .get_digital_twin import *
from .get_digital_twins_endpoint import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:digitaltwins/v20200301preview:DigitalTwin":
return DigitalTwin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:digitaltwins/v20200301preview:DigitalTwinsEndpoint":
return DigitalTwinsEndpoint(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "digitaltwins/v20200301preview", _module_instance)
_register_module()
| 34.657895
| 111
| 0.700835
|
597e4848ea3da5f71337ef3d6b9b31e263b7dc8d
| 6,431
|
py
|
Python
|
test/functional/rpc_bind.py
|
konjoinfinity/konjocoin
|
3f638857a17fc844f6ee208aac9c7aefa179f0d5
|
[
"MIT"
] | null | null | null |
test/functional/rpc_bind.py
|
konjoinfinity/konjocoin
|
3f638857a17fc844f6ee208aac9c7aefa179f0d5
|
[
"MIT"
] | null | null | null |
test/functional/rpc_bind.py
|
konjoinfinity/konjocoin
|
3f638857a17fc844f6ee208aac9c7aefa179f0d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running konjocoind with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import KonjocoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(KonjocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| 49.469231
| 172
| 0.633494
|
3dbdd1b2f561e1c2c1747342ecd61b8bc53e8f0d
| 572
|
py
|
Python
|
_Listas/inter_vetor.py
|
M3nin0/supreme-broccoli
|
186c1ea3b839ba3139f9301660dec8fbd27a162e
|
[
"Apache-2.0"
] | null | null | null |
_Listas/inter_vetor.py
|
M3nin0/supreme-broccoli
|
186c1ea3b839ba3139f9301660dec8fbd27a162e
|
[
"Apache-2.0"
] | null | null | null |
_Listas/inter_vetor.py
|
M3nin0/supreme-broccoli
|
186c1ea3b839ba3139f9301660dec8fbd27a162e
|
[
"Apache-2.0"
] | null | null | null |
# Gerando um vetor intermediario com elemento de dois outros vetores
vetorUM = []
vetorMED = []
vetorDOIS = []
ct = 1
while ct <= 10:
elm = int(input("Insira um valor do vetor 1: "))
vetorUM.append(elm)
vetorMED.append(elm)
elm = int(input("Insira um valor do vetor 2: "))
vetorDOIS.append(elm)
vetorMED.append(elm)
ct += 1
print("Os elementos inseridos no primeiro vetor são:\n", vetorUM)
print("Os elementos inseridos no segundo vetor são:\n", vetorDOIS)
print("E o vetor gerado com o elementos dos dois vetores tem os seguintes elementos:\n", vetorMED)
| 23.833333
| 98
| 0.713287
|
e329aae6856bb6329cece3dc2ca1f71bfb681f8b
| 11,208
|
py
|
Python
|
sktime/forecasting/tests/test_all_forecasters.py
|
ltoniazzi/sktime
|
0ea07803115c1ec7463dde99f049b131d639f4a7
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T18:56:12.000Z
|
2021-11-02T18:56:12.000Z
|
sktime/forecasting/tests/test_all_forecasters.py
|
ltoniazzi/sktime
|
0ea07803115c1ec7463dde99f049b131d639f4a7
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/tests/test_all_forecasters.py
|
ltoniazzi/sktime
|
0ea07803115c1ec7463dde99f049b131d639f4a7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
# test API provided through BaseForecaster
__author__ = ["Markus Löning"]
__all__ = [
"test_raises_not_fitted_error",
"test_score",
"test_predict_time_index",
"test_update_predict_predicted_index",
"test_update_predict_predicted_index_update_params",
"test_y_multivariate_raises_error",
"test_get_fitted_params",
"test_predict_time_index_in_sample_full",
"test_predict_pred_interval",
"test_update_predict_single",
"test_y_invalid_type_raises_error",
"test_predict_time_index_with_X",
"test_X_invalid_type_raises_error",
]
import numpy as np
import pandas as pd
import pytest
from sktime.exceptions import NotFittedError
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.forecasting.tests._config import TEST_ALPHAS
from sktime.forecasting.tests._config import TEST_FHS
from sktime.forecasting.tests._config import TEST_OOS_FHS
from sktime.forecasting.tests._config import TEST_STEP_LENGTHS
from sktime.forecasting.tests._config import TEST_WINDOW_LENGTHS
from sktime.forecasting.tests._config import VALID_INDEX_FH_COMBINATIONS
from sktime.performance_metrics.forecasting import (
mean_absolute_percentage_error,
)
from sktime.utils import all_estimators
from sktime.utils._testing.estimator_checks import _construct_instance
from sktime.utils._testing.forecasting import _assert_correct_pred_time_index
from sktime.utils._testing.forecasting import _get_expected_index_for_update_predict
from sktime.utils._testing.forecasting import _make_fh
from sktime.utils._testing.forecasting import make_forecasting_problem
from sktime.utils._testing.series import _make_series
from sktime.utils.validation.forecasting import check_fh
# get all forecasters
FORECASTERS = all_estimators(estimator_types="forecaster", return_names=False)
FH0 = 1
INVALID_INPUT_TYPES = [np.empty(20), list(), tuple()]
# testing data
y = make_forecasting_problem()
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
@pytest.mark.parametrize("Forecaster", FORECASTERS)
def test_get_fitted_params(Forecaster):
f = _construct_instance(Forecaster)
f.fit(y_train, fh=FH0)
try:
params = f.get_fitted_params()
assert isinstance(params, dict)
except NotImplementedError:
pass
@pytest.mark.parametrize("Forecaster", FORECASTERS)
def test_raises_not_fitted_error(Forecaster):
# We here check extra method of the forecaster API: update and update_predict.
f = _construct_instance(Forecaster)
# predict is check in test suite for all estimators
with pytest.raises(NotFittedError):
f.update(y_test, update_params=False)
with pytest.raises(NotFittedError):
cv = SlidingWindowSplitter(fh=1, window_length=1, start_with_window=False)
f.update_predict(y_test, cv=cv)
try:
with pytest.raises(NotFittedError):
f.get_fitted_params()
except NotImplementedError:
pass
@pytest.mark.parametrize("Forecaster", FORECASTERS)
def test_y_multivariate_raises_error(Forecaster):
# Check that multivariate y raises an appropriate error message.
y = _make_series(n_columns=2)
f = _construct_instance(Forecaster)
with pytest.raises(ValueError, match=r"univariate"):
f.fit(y, fh=FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("y", INVALID_INPUT_TYPES)
def test_y_invalid_type_raises_error(Forecaster, y):
f = _construct_instance(Forecaster)
with pytest.raises(TypeError, match=r"type"):
f.fit(y, fh=FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("X", INVALID_INPUT_TYPES)
def test_X_invalid_type_raises_error(Forecaster, X):
f = _construct_instance(Forecaster)
try:
with pytest.raises(TypeError, match=r"type"):
f.fit(y_train, X, fh=FH0)
except NotImplementedError as e:
msg = str(e).lower()
assert "exogenous" in msg
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize(
"index_type, fh_type, is_relative", VALID_INDEX_FH_COMBINATIONS
)
@pytest.mark.parametrize("steps", TEST_FHS) # fh steps
def test_predict_time_index(Forecaster, index_type, fh_type, is_relative, steps):
# Check that predicted time index matches forecasting horizon.
y_train = make_forecasting_problem(index_type=index_type)
cutoff = y_train.index[-1]
fh = _make_fh(cutoff, steps, fh_type, is_relative)
f = _construct_instance(Forecaster)
# Some estimators may not support all time index types and fh types, hence we
# need to catch NotImplementedErrors.
try:
f.fit(y_train, fh=fh)
y_pred = f.predict()
_assert_correct_pred_time_index(y_pred.index, y_train.index[-1], fh)
except NotImplementedError:
pass
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize(
"index_type, fh_type, is_relative", VALID_INDEX_FH_COMBINATIONS
)
@pytest.mark.parametrize("steps", TEST_OOS_FHS) # fh steps
def test_predict_time_index_with_X(Forecaster, index_type, fh_type, is_relative, steps):
# Check that predicted time index matches forecasting horizon.
y, X = make_forecasting_problem(index_type=index_type, make_X=True)
cutoff = y.index[len(y) // 2]
fh = _make_fh(cutoff, steps, fh_type, is_relative)
y_train, y_test, X_train, X_test = temporal_train_test_split(y, X, fh=fh)
f = _construct_instance(Forecaster)
# Some estimators may not support all time index types and fh types, hence we
# need to catch NotImplementedErrors.
try:
f.fit(y_train, X_train, fh=fh)
y_pred = f.predict(X=X_test)
_assert_correct_pred_time_index(y_pred.index, y_train.index[-1], fh)
except NotImplementedError:
pass
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize(
"index_type, fh_type, is_relative", VALID_INDEX_FH_COMBINATIONS
)
def test_predict_time_index_in_sample_full(
Forecaster, index_type, fh_type, is_relative
):
# Check that predicted time index matched forecasting horizon for full in-sample
# predictions.
y_train = make_forecasting_problem(index_type=index_type)
cutoff = y_train.index[-1]
steps = -np.arange(len(y_train)) # full in-sample fh
fh = _make_fh(cutoff, steps, fh_type, is_relative)
f = _construct_instance(Forecaster)
# Some estimators may not support all time index types and fh types, hence we
# need to catch NotImplementedErrors.
try:
f.fit(y_train, fh=fh)
y_pred = f.predict()
_assert_correct_pred_time_index(y_pred.index, y_train.index[-1], fh)
except NotImplementedError:
pass
def _check_pred_ints(pred_ints: list, y_train: pd.Series, y_pred: pd.Series, fh):
# make iterable
if isinstance(pred_ints, pd.DataFrame):
pred_ints = [pred_ints]
for pred_int in pred_ints:
# check column naming convention
assert list(pred_int.columns) == ["lower", "upper"]
# check time index
_assert_correct_pred_time_index(pred_int.index, y_train.index[-1], fh)
# check values
assert np.all(pred_int["upper"] > y_pred)
assert np.all(pred_int["lower"] < y_pred)
# check if errors are weakly monotonically increasing
# pred_errors = y_pred - pred_int["lower"]
# # assert pred_errors.is_mononotic_increasing
# assert np.all(
# pred_errors.values[1:].round(4) >= pred_errors.values[:-1].round(4)
# )
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("fh", TEST_OOS_FHS)
@pytest.mark.parametrize("alpha", TEST_ALPHAS)
def test_predict_pred_interval(Forecaster, fh, alpha):
# Check prediction intervals.
f = _construct_instance(Forecaster)
f.fit(y_train, fh=fh)
try:
y_pred, pred_ints = f.predict(return_pred_int=True, alpha=alpha)
_check_pred_ints(pred_ints, y_train, y_pred, fh)
except NotImplementedError:
pass
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("fh", TEST_OOS_FHS)
def test_score(Forecaster, fh):
# Check score method
f = _construct_instance(Forecaster)
f.fit(y_train, fh=fh)
y_pred = f.predict()
fh_idx = check_fh(fh).to_indexer() # get zero based index
expected = mean_absolute_percentage_error(
y_pred, y_test.iloc[fh_idx], symmetric=True
)
# compare with actual score
f = _construct_instance(Forecaster)
f.fit(y_train, fh=fh)
actual = f.score(y_test.iloc[fh_idx], fh=fh)
assert actual == expected
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("fh", TEST_OOS_FHS)
@pytest.mark.parametrize("update_params", [True, False])
def test_update_predict_single(Forecaster, fh, update_params):
# Check correct time index of update-predict
f = _construct_instance(Forecaster)
f.fit(y_train, fh=fh)
y_pred = f.update_predict_single(y_test, update_params=update_params)
_assert_correct_pred_time_index(y_pred.index, y_test.index[-1], fh)
def _check_update_predict_predicted_index(
Forecaster, fh, window_length, step_length, update_params
):
y = make_forecasting_problem(all_positive=True, index_type="datetime")
y_train, y_test = temporal_train_test_split(y)
cv = SlidingWindowSplitter(
fh,
window_length=window_length,
step_length=step_length,
start_with_window=False,
)
f = _construct_instance(Forecaster)
f.fit(y_train, fh=fh)
y_pred = f.update_predict(y_test, cv=cv, update_params=update_params)
assert isinstance(y_pred, (pd.Series, pd.DataFrame))
if isinstance(y_pred, pd.DataFrame):
assert y_pred.shape[1] > 1
expected = _get_expected_index_for_update_predict(y_test, fh, step_length)
actual = y_pred.index
np.testing.assert_array_equal(actual, expected)
# test with update_params=False and different values for steps_length
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("fh", TEST_OOS_FHS)
@pytest.mark.parametrize("window_length", TEST_WINDOW_LENGTHS)
@pytest.mark.parametrize("step_length", TEST_STEP_LENGTHS)
@pytest.mark.parametrize("update_params", [False])
def test_update_predict_predicted_index(
Forecaster, fh, window_length, step_length, update_params
):
_check_update_predict_predicted_index(
Forecaster, fh, window_length, step_length, update_params
)
# test with update_params=True and step_length=1
@pytest.mark.parametrize("Forecaster", FORECASTERS)
@pytest.mark.parametrize("fh", TEST_OOS_FHS)
@pytest.mark.parametrize("window_length", TEST_WINDOW_LENGTHS)
@pytest.mark.parametrize("step_length", [1])
@pytest.mark.parametrize("update_params", [True])
def test_update_predict_predicted_index_update_params(
Forecaster, fh, window_length, step_length, update_params
):
_check_update_predict_predicted_index(
Forecaster, fh, window_length, step_length, update_params
)
| 36.38961
| 88
| 0.746074
|
13200bc2c7a8e7f91d588fd9142a94445ea1f9d9
| 3,353
|
py
|
Python
|
ticTacToeGUI.py
|
yashovardhan99/Tic-Tac-Toe
|
62797eb622827fac5b00a990401c8d5ddeb6a637
|
[
"Apache-2.0"
] | 1
|
2018-04-27T09:30:51.000Z
|
2018-04-27T09:30:51.000Z
|
ticTacToeGUI.py
|
yashovardhan99/Tic-Tac-Toe
|
62797eb622827fac5b00a990401c8d5ddeb6a637
|
[
"Apache-2.0"
] | null | null | null |
ticTacToeGUI.py
|
yashovardhan99/Tic-Tac-Toe
|
62797eb622827fac5b00a990401c8d5ddeb6a637
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
import tkinter.messagebox
from ticTacToe import getAIMove,isWinner,isSpaceFree,makeMove
b = ['']*10
var = ['']*10
gameBoard = ['']*10
playerLetter='X' #Change later
AILetter = 'O'
playerColor='red'#Will be able to change later
AIColor='blue'
playerMove=False
startGameCheck = False
moves = 0
master=Tk()
def makeGUIMove(pos,board,letter):
#To make the relevant move and also update the GUI accordingly
makeMove(letter,board,pos)
if letter is playerLetter:
b[pos].config(text=letter,disabledforeground=playerColor)
else:
b[pos].config(text=letter,disabledforeground=AIColor)
b[pos].config(state=DISABLED)
#Check if winner as well!
pass
def checkDraw():
global moves
if moves>=9:
tkinter.messagebox.showinfo(title='Tic Tac Toe',message="It's a draw!")
def makeAIMove():
global moves,playerMove
move = getAIMove(gameBoard,AILetter)
makeGUIMove(move,gameBoard,AILetter)
playerMove=True
moves = moves+1
if isWinner(gameBoard,AILetter):
tkinter.messagebox.showinfo(title='Tic Tac Toe',message="Oops! The AI wins!")
else:
checkDraw()
def onClick(id):
global moves
if not startGameCheck:
startGame()
return
global playerMove
if playerMove and isSpaceFree(id,gameBoard):
playerMove=False
makeGUIMove(id,gameBoard,playerLetter)
moves = moves+1
if isWinner(gameBoard,playerLetter):
tkinter.messagebox.showinfo(title='Tic Tac Toe',message="You Win!")
else:
checkDraw()
makeAIMove()
#check for winner
else:
#Do Something maybe
pass
def restartGame():
global gameBoard,moves,b,var,playerMove,startGameCheck
for i in range(1,10):
gameBoard[i]=str(i)
var[i]=Variable(value=0)
b[i].config(text=str(i),state=NORMAL)
playerMove=False
startGameCheck=False
moves=0
startGame()
def __init__():
global gameBoard,master
#Initial setup of game board
for i in range(1,10):
gameBoard[i]=str(i)
var[i]=Variable(value=0)
b[i] = Button(master,text=str(i),font={"arial",10,"bold"},padx=2,pady=2,overrelief=RIDGE,command= lambda id=i:onClick(id))
#b[i].pack(fill=BOTH,expand=1)
if i in range(1,4):
b[i].grid(row=2,column=i-1,sticky=NSEW)
elif i in range(4,7):
b[i].grid(row=1,column=i-4,sticky=NSEW)
else:
b[i].grid(row=0,column=i-7,sticky=NSEW)
for i in range(3):
Grid.columnconfigure(master,i,weight=1,minsize=80)
Grid.rowconfigure(master,i,weight=1,minsize=80)
menubar = Menu(master)
menubar.add_command(label='Restart Game',command=restartGame)
master.config(menu=menubar)
master.title("Tic Tac Toe")
startGame()
#Starting here
def startGame():
global moves
global playerMove
global startGameCheck
startGameCheck=True
#starts the logical part of the game
#We assume right now that the player starts first and is X (RED)
moves=0
current = 0 #0 for player, 1 for AI
if current==0:
playerMove=TRUE
else:
makeAIMove()
playerMove=False
#Calls mainloop for GUI setup. Should only be called once
__init__() #Inirial setup
master.mainloop()
| 27.710744
| 130
| 0.650164
|
a699ae1a93e1ae038d689ac23ff2015712a89146
| 9,984
|
py
|
Python
|
src/Final-NeuralFBProphet/optim/bayesian.py
|
ds-wook/Final-NeuralFBProphet
|
0cb44bfa0ca1ea49077e29bce4d34af2c6a4d618
|
[
"Apache-2.0"
] | 16
|
2021-04-29T13:27:37.000Z
|
2021-12-30T00:19:00.000Z
|
src/Final-NeuralFBProphet/optim/bayesian.py
|
ds-wook/Final-NeuralFBProphet
|
0cb44bfa0ca1ea49077e29bce4d34af2c6a4d618
|
[
"Apache-2.0"
] | null | null | null |
src/Final-NeuralFBProphet/optim/bayesian.py
|
ds-wook/Final-NeuralFBProphet
|
0cb44bfa0ca1ea49077e29bce4d34af2c6a4d618
|
[
"Apache-2.0"
] | 3
|
2021-05-20T04:05:15.000Z
|
2021-05-20T04:12:40.000Z
|
from typing import Callable
import joblib
import optuna
import pandas as pd
from fbprophet import Prophet
from optuna import Trial
from optuna.samplers import TPESampler
from sklearn.metrics import mean_squared_error
class BayesianOptimizer:
def __init__(self, objective_function: object):
self.objective_function = objective_function
def build_study(self, trials: int, verbose: bool = False):
sampler = TPESampler(seed=42)
study = optuna.create_study(
study_name="TPE hyperparameter",
direction="minimize",
sampler=sampler,
)
study.optimize(self.objective_function, n_trials=trials)
if verbose:
self.display_study_statistics(study)
return study
def display_study_statistics(study: optuna.create_study):
print("Best Score:", study.best_value)
print("Best trial:", study.best_trial.params)
@staticmethod
def save_params(study: optuna.create_study, params_name: str):
params = study.best_trial.params
joblib.dump(params, "../../parameters/" + params_name)
@staticmethod
def save_two_params(study: optuna.create_study, params_name: str):
prophet_params = study.best_params
prophet_params["growth"] = "logistic"
prophet_params["seasonality_mode"] = "additive"
prophet_params["weekly_seasonality"] = True
prophet_params["daily_seasonality"] = True
prophet_params["yearly_seasonality"] = False
joblib.dump(prophet_params, "../../parameters/" + params_name)
@staticmethod
def plot_optimization_history(study: optuna.create_study) -> optuna.visualization:
return optuna.visualization.plot_optimization_history(study)
@staticmethod
def plot_param_importances(study: optuna.create_study) -> optuna.visualization:
return optuna.visualization.plot_param_importances(study)
@staticmethod
def plot_edf(study: optuna.create_study) -> optuna.visualization:
return optuna.visualization.plot_edf(study)
def ontune_prophet_objective(
train: pd.DataFrame, valid: pd.Series, cap: float, floor: float
) -> Callable[[Trial], float]:
def objective(trial: Trial) -> float:
params = {
"changepoint_range": trial.suggest_discrete_uniform(
"changepoint_range", 0.8, 0.95, 0.001
),
"n_changepoints": trial.suggest_int("n_changepoints", 20, 35),
"changepoint_prior_scale": trial.suggest_discrete_uniform(
"changepoint_prior_scale", 0.001, 0.5, 0.001
),
"seasonality_prior_scale": trial.suggest_discrete_uniform(
"seasonality_prior_scale", 1, 25, 0.5
),
"yearly_fourier": trial.suggest_int("yearly_fourier", 5, 15),
"monthly_fourier": trial.suggest_int("monthly_fourier", 3, 12),
"weekly_fourier": trial.suggest_int("weekly_fourier", 3, 7),
"quaterly_fourier": trial.suggest_int("quaterly_fourier", 3, 10),
"yearly_prior": trial.suggest_discrete_uniform("yearly_prior", 1, 25, 0.5),
"monthly_prior": trial.suggest_discrete_uniform(
"monthly_prior", 1, 25, 0.5
),
"weekly_prior": trial.suggest_discrete_uniform("weekly_prior", 1, 25, 0.5),
"quaterly_prior": trial.suggest_discrete_uniform(
"quaterly_prior", 1, 25, 0.5
),
"growth": "logistic",
"seasonality_mode": "additive",
"weekly_seasonality": True,
"daily_seasonality": True,
}
# fit_model
model = Prophet(
changepoint_range=params["changepoint_prior_scale"],
n_changepoints=params["n_changepoints"],
changepoint_prior_scale=params["changepoint_prior_scale"],
seasonality_prior_scale=params["seasonality_prior_scale"],
yearly_seasonality=False,
weekly_seasonality=True,
daily_seasonality=True,
growth="logistic",
seasonality_mode="additive",
)
model.add_seasonality(
name="yearly",
period=365.25,
fourier_order=params["yearly_fourier"],
prior_scale=params["yearly_prior"],
)
model.add_seasonality(
name="monthly",
period=30.5,
fourier_order=params["monthly_fourier"],
prior_scale=params["monthly_prior"],
)
model.add_seasonality(
name="weekly",
period=7,
fourier_order=params["weekly_fourier"],
prior_scale=params["weekly_prior"],
)
model.add_seasonality(
name="quaterly",
period=365.25 / 4,
fourier_order=params["quaterly_fourier"],
prior_scale=params["quaterly_prior"],
)
train["cap"] = cap
train["floor"] = floor
model.fit(train)
future = model.make_future_dataframe(periods=144, freq="d")
future["cap"] = cap
future["floor"] = floor
forecast = model.predict(future)
valid_forecast = forecast.tail(7)
rmse = mean_squared_error(valid.y, valid_forecast.yhat, squared=False)
return rmse
return objective
def vcenter_prophet_objective(
train: pd.DataFrame, valid: pd.Series, cap: float, floor: float
) -> Callable[[Trial], float]:
def objective(trial: Trial) -> float:
params = {
"changepoint_range": trial.suggest_discrete_uniform(
"changepoint_range", 0.8, 0.95, 0.001
),
"n_changepoints": trial.suggest_int("n_changepoints", 20, 35),
"changepoint_prior_scale": trial.suggest_discrete_uniform(
"changepoint_prior_scale", 0.001, 0.5, 0.001
),
"seasonality_prior_scale": trial.suggest_discrete_uniform(
"seasonality_prior_scale", 1, 25, 0.5
),
"yearly_fourier": trial.suggest_int("yearly_fourier", 5, 15),
"monthly_fourier": trial.suggest_int("monthly_fourier", 3, 12),
"weekly_fourier": trial.suggest_int("weekly_fourier", 3, 7),
"quaterly_fourier": trial.suggest_int("quaterly_fourier", 3, 10),
"yearly_prior": trial.suggest_discrete_uniform("yearly_prior", 1, 25, 0.5),
"monthly_prior": trial.suggest_discrete_uniform(
"monthly_prior", 1, 25, 0.5
),
"weekly_prior": trial.suggest_discrete_uniform("weekly_prior", 1, 25, 0.5),
"quaterly_prior": trial.suggest_discrete_uniform(
"quaterly_prior", 1, 25, 0.5
),
"growth": "logistic",
"seasonality_mode": "additive",
"weekly_seasonality": True,
"daily_seasonality": True,
}
# fit_model
model = Prophet(
changepoint_range=params["changepoint_prior_scale"],
n_changepoints=params["n_changepoints"],
changepoint_prior_scale=params["changepoint_prior_scale"],
seasonality_prior_scale=params["seasonality_prior_scale"],
yearly_seasonality=False,
weekly_seasonality=True,
daily_seasonality=True,
growth="logistic",
seasonality_mode="additive",
)
model.add_seasonality(
name="yearly",
period=365.25,
fourier_order=params["yearly_fourier"],
prior_scale=params["yearly_prior"],
)
model.add_seasonality(
name="monthly",
period=30.5,
fourier_order=params["monthly_fourier"],
prior_scale=params["monthly_prior"],
)
model.add_seasonality(
name="weekly",
period=7,
fourier_order=params["weekly_fourier"],
prior_scale=params["weekly_prior"],
)
model.add_seasonality(
name="quaterly",
period=365.25 / 4,
fourier_order=params["quaterly_fourier"],
prior_scale=params["quaterly_prior"],
)
train["cap"] = cap
train["floor"] = floor
model.fit(train)
future = model.make_future_dataframe(periods=144, freq="d")
future["cap"] = cap
future["floor"] = floor
forecast = model.predict(future)
valid_forecast = forecast.tail(7)
rmse = mean_squared_error(valid.y, valid_forecast.yhat, squared=False)
return rmse
return objective
def two_second_prophet_objective(
train: pd.DataFrame, valid: pd.Series, cap: float, floor: float
) -> Callable[[Trial], float]:
def objective(trial: Trial) -> float:
params = {
"changepoint_range": trial.suggest_discrete_uniform(
"changepoint_range", 0.8, 0.95, 0.001
),
"n_changepoints": trial.suggest_int("n_changepoints", 20, 35),
"changepoint_prior_scale": trial.suggest_discrete_uniform(
"changepoint_prior_scale", 0.001, 0.5, 0.001
),
"seasonality_prior_scale": trial.suggest_discrete_uniform(
"seasonality_prior_scale", 1, 25, 0.5
),
"growth": "logistic",
"seasonality_mode": "additive",
"yearly_seasonality": False,
"weekly_seasonality": True,
"daily_seasonality": True,
}
# fit_model
m = Prophet(**params)
train["cap"] = cap
train["floor"] = floor
m.fit(train)
future = m.make_future_dataframe(periods=163, freq="H")
future["cap"] = cap
future["floor"] = floor
forecast = m.predict(future)
valid_forecast = forecast.tail(163)
val_rmse = mean_squared_error(valid.y, valid_forecast.yhat, squared=False)
return val_rmse
return objective
| 36.977778
| 87
| 0.605869
|
9a1e3c63e76154b488ead642b72aca448ac44a5e
| 8,624
|
py
|
Python
|
qmcpack_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | 2
|
2020-08-13T23:32:03.000Z
|
2021-03-28T01:14:06.000Z
|
qmcpack_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | null | null | null |
qmcpack_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import pandas as pd
def get_supertwists(qmc_out):
""" read supercell twists from QMCPACK output
Args:
qmc_out (str): QMCPACK output, must contain "Super twist #"
Return:
np.array: an array of twist vectors (ntwist, ndim)
"""
from qharv.reel import ascii_out
mm = ascii_out.read(qmc_out)
idxl = ascii_out.all_lines_with_tag(mm, 'Super twist #')
lines = ascii_out.all_lines_at_idx(mm ,idxl)
data = []
for line in lines:
text = ascii_out.lr_mark(line, '[', ']')
vec = np.array(text.split(), dtype=float)
data.append(vec)
mat = np.array(data)
return mat
def epl_val_err(epl_out):
""" convert epl_out to a pandas DataFrame.
epl_out is expected to be an output of energy.pl from QMCPACK
It simply has to have the format {name:22c}={val:17.3f} +/- {err:26.4f}.
rows with forces will be recognized with 'force_prefix'
Args:
epl_out (str): energy.pl output filename
Returns:
pd.DataFrame: df contains columns ['name','val','err']
"""
tab = pd.read_csv(epl_out, delimiter='=', names=['name', 'text'])
tab = tab.dropna()
def text2val(text):
tokens = text.split('+/-')
if len(tokens) != 2:
raise NotImplementedError('unrecognized value '+text)
val,err = map(float, tokens)
return pd.Series({'val':val, 'err':err})
df = pd.concat([
tab.drop('text', axis=1),
tab['text'].apply(text2val)],
axis=1)
return df
def epldf_to_entry(df):
names = [name.strip() for name in df.name.values]
ymean = ['%s_mean' % name for name in names]
yerror = ['%s_error' % name for name in names]
names1 = np.concatenate([ymean, yerror])
means = df.val.values
errs = df.err.values
entry = pd.Series(np.concatenate([means, errs]), names1)
return entry
def get_forces(df, natom, prefix='force', ndim=3):
yml = []
yel = []
for iatom in range(natom):
for idim in range(ndim):
col = '%s_%d_%d' % (prefix, iatom, idim)
sel = df.name.apply(lambda x: col in x)
y1m = df.loc[sel].val.squeeze()
y1e = df.loc[sel].err.squeeze()
yml.append(y1m)
yel.append(y1e)
return np.array(yml), np.array(yel)
def sk_from_fs_out(fs_out):
""" extract fluctuating S(k) from qmcfinitesize output
returns: kmag,sk,vk,spk,spsk
kmag: magnitude of kvectors, sk: raw fluc. S(k), vk: long-range potential after break-up
spk: kmags for splined S(k), spsk: splined S(k) """
import reader
bint = reader.BlockInterpreter()
sfile = reader.SearchableFile(fs_out)
# read raw data
block_text = sfile.block_text('#SK_RAW_START#','#SK_RAW_STOP#')
kmag,sk,vk = bint.matrix(block_text[block_text.find('\n')+1:]).T
# read splined S(k)
block_text = sfile.block_text('#SK_SPLINE_START#','#SK_SPLINE_STOP#')
spk,spsk = bint.matrix(block_text[block_text.find('\n')+1:]).T
return kmag,sk,vk,spk,spsk
# end def
# =============== complicated functions ===============
import numpy as np
from copy import deepcopy
def read_jastrows(jas_node):
""" 'jas_node' should be an xml node containing bspline jastrows
put coefficients and attributes into a list of dictionaries """
if (jas_node.attrib["type"] != "Two-Body"): # works for one-body! miracle!
pass#raise TypeError("input is not a two-body Jastrow xml node")
elif (jas_node.attrib["function"].lower() != "bspline"):
raise NotImplementedError("can only handle bspline Jastrows for now")
# end if
data = []
for corr in jas_node.xpath('./correlation'):
coeff = corr.xpath('./coefficients')[0]
entry = deepcopy( corr.attrib )
entry.update(coeff.attrib)
entry['coeff'] = np.array(coeff.text.split(),dtype=float)
entry['type'] = jas_node.attrib['type']
data.append(entry)
# end for corr
return data
# end def read_jastrows
from lxml import etree
def extract_jastrows(qmcpack_input,json_name='jas.json',warn=True,force_refresh=False):
""" given a QMCPACK input that contains linear optimization, extract all printed Jastrows and store in a local database
1. parse 'qmcpack_input' for the qmc[@metho="linear"] section
2. for each *.opt.xml, parse if it exists
3. parse each opt.xml and make local database """
failed = False
subdir = os.path.dirname(qmcpack_input)
target_json = os.path.join(subdir,json_name)
if os.path.isfile(target_json) and (not force_refresh):
if warn:
print("skipping %s" % subdir)
# end if
return 0 # skip ths file
# end if
parser = etree.XMLParser(remove_blank_text=True)
# get prefix
xml = etree.parse(qmcpack_input,parser)
proj = xml.xpath("//project")[0]
prefix = proj.attrib['id']
# determine number of optimization loops
all_qmc_sections = xml.xpath('.//qmc[@method="linear"]')
all_iopt = 0 # track multiple 'linear' sections
data = []
for qmc_section in all_qmc_sections:
# for each linear optimization:
# find the number of loops
nopt = 1
loop = qmc_section.getparent()
if loop.tag == 'loop':
nopt = int(loop.attrib['max'])
# end if
# collect all jastrow coefficients
for iopt in range(nopt):
# get optimization file
opt_file = prefix + ".s%s.opt.xml" % str(all_iopt).zfill(3)
opt_xml = os.path.join(subdir,opt_file)
if not os.path.isfile(opt_xml):
if warn:
print("skipping %d in %s" % (all_iopt,subdir))
# end if
continue
# end if
# parse optimization file
opt = etree.parse(opt_xml,parser)
jnodes = opt.xpath('//jastrow')
for jas_node in jnodes:
entries = read_jastrows(jas_node)
for entry in entries:
entry['iopt'] = all_iopt
# end for entry
data.append(entry)
# end for
all_iopt += 1
# end for iopt
# end for qmc_section
if len(data) == 0:
failed = True
else:
df = pd.DataFrame( data )
df.to_json(target_json)
# end if
return failed
# end def extract_jastrows
def extract_best_jastrow_set(opt_input,opt_json='opt_scalar.json',nequil='auto',force_refresh=False):
import nexus_addon as na
subdir = os.path.dirname(opt_input)
# locally create jas.json
extract_jastrows(opt_input,force_refresh=force_refresh)
# locally create opt_scalar.json
scalar_json = os.path.join(subdir,opt_json)
if (not os.path.isfile(scalar_json)) or force_refresh:
# initialize analyzer
from qmca import QBase
options = {"equilibration":nequil}
QBase.options.transfer_from(options)
entry = na.scalars_from_input(opt_input)
pd.DataFrame(entry).to_json(scalar_json)
# end if
# get best jastrow set
best_jas = collect_best_jastrow_set(subdir)
return best_jas
# end def extract_best_jastrow_set
def collect_best_jastrow_set(subdir,jas_json='jas.json',opt_json='opt_scalar.json'
,rval_weight=0.75,rerr_weight=0.25):
""" find best set of jastrows in 'subdir', assume files:
1. jas.json: a database of QMCPACK bspline jastrows with 'iopt' column
2. opt_scalar.json: a database of QMCPACK scalars including 'LocalEnergy_mean', 'LocalEnergy_error', 'Variance_mean', and 'Variance_error' """
from dmc_database_analyzer import div_columns
jfile = os.path.join(subdir,jas_json)
if not os.path.isfile(jfile):
raise RuntimeError('%s not found in %s' % (jfile,subdir))
# end if
ofile = os.path.join(subdir,opt_json)
if not os.path.isfile(ofile):
raise RuntimeError('%s not found in %s' % (ofile,subdir))
# end if
jdf = pd.read_json(jfile) # jastrows
sdf = pd.read_json(ofile) # scalars
# same units for stddev and LocalEnergy
sdf['stddev_mean'] = sdf['Variance_mean'].apply(np.sqrt)
sdf['stddev_error'] = sdf['Variance_error'].apply(np.sqrt)
# make ratios
ratio_mean, ratio_error = div_columns(['stddev','LocalEnergy'],sdf)
# take both value and error into account
rv_cost = ratio_mean/ratio_mean.mean()
re_cost = ratio_error/ratio_error.mean()
# make a cost function
cost = rv_cost*rval_weight + re_cost*rerr_weight
# minimize cost function
idx = np.argmin(cost)
# grab winner jastrows
best_jas = jdf[jdf['iopt']==idx].copy()
return best_jas
# end def collect_best_jastrow_set
| 32.666667
| 147
| 0.64019
|
3a79321c3f9f4274a159e46b2adb15ca78439857
| 5,166
|
py
|
Python
|
maskrcnn_benchmark/modeling/matcher.py
|
Sreehari-S/mask-rcnn-benchmark
|
b4434c39fccda80575276308da86b6e944540445
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/modeling/matcher.py
|
Sreehari-S/mask-rcnn-benchmark
|
b4434c39fccda80575276308da86b6e944540445
|
[
"MIT"
] | 1
|
2020-02-18T12:25:48.000Z
|
2020-02-18T12:25:48.000Z
|
maskrcnn_benchmark/modeling/matcher.py
|
Sreehari-S/mask-rcnn-benchmark
|
b4434c39fccda80575276308da86b6e944540445
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
#print(match_quality_matrix)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD
matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| 45.315789
| 88
| 0.634533
|
e1544f1fa22df9f7413b0ddbbdd89fb780379761
| 3,696
|
py
|
Python
|
venv/lib/python3.8/site-packages/django/contrib/sites/models.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 16
|
2019-08-10T12:24:06.000Z
|
2020-05-21T09:11:14.000Z
|
venv/lib/python3.8/site-packages/django/contrib/sites/models.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 12
|
2019-08-10T11:55:29.000Z
|
2020-05-21T04:46:30.000Z
|
venv/lib/python3.8/site-packages/django/contrib/sites/models.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 4
|
2022-03-12T10:17:00.000Z
|
2022-03-26T08:40:43.000Z
|
import string
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.signals import pre_delete, pre_save
from django.http.request import split_domain_port
from django.utils.translation import gettext_lazy as _
SITE_CACHE = {}
def _simple_domain_name_validator(value):
"""
Validate that the given value contains no whitespaces to prevent common
typos.
"""
checks = ((s in value) for s in string.whitespace)
if any(checks):
raise ValidationError(
_("The domain name cannot contain any spaces or tabs."),
code="invalid",
)
class SiteManager(models.Manager):
use_in_migrations = True
def _get_site_by_id(self, site_id):
if site_id not in SITE_CACHE:
site = self.get(pk=site_id)
SITE_CACHE[site_id] = site
return SITE_CACHE[site_id]
def _get_site_by_request(self, request):
host = request.get_host()
try:
# First attempt to look up the site by host with or without port.
if host not in SITE_CACHE:
SITE_CACHE[host] = self.get(domain__iexact=host)
return SITE_CACHE[host]
except Site.DoesNotExist:
# Fallback to looking up site after stripping port from the host.
domain, port = split_domain_port(host)
if domain not in SITE_CACHE:
SITE_CACHE[domain] = self.get(domain__iexact=domain)
return SITE_CACHE[domain]
def get_current(self, request=None):
"""
Return the current Site based on the SITE_ID in the project's settings.
If SITE_ID isn't defined, return the site with domain matching
request.get_host(). The ``Site`` object is cached the first time it's
retrieved from the database.
"""
from django.conf import settings
if getattr(settings, "SITE_ID", ""):
site_id = settings.SITE_ID
return self._get_site_by_id(site_id)
elif request:
return self._get_site_by_request(request)
raise ImproperlyConfigured(
'You\'re using the Django "sites framework" without having '
"set the SITE_ID setting. Create a site in your database and "
"set the SITE_ID setting or pass a request to "
"Site.objects.get_current() to fix this error."
)
def clear_cache(self):
"""Clear the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
def get_by_natural_key(self, domain):
return self.get(domain=domain)
class Site(models.Model):
domain = models.CharField(
_("domain name"),
max_length=100,
validators=[_simple_domain_name_validator],
unique=True,
)
name = models.CharField(_("display name"), max_length=50)
objects = SiteManager()
class Meta:
db_table = "django_site"
verbose_name = _("site")
verbose_name_plural = _("sites")
ordering = ["domain"]
def __str__(self):
return self.domain
def natural_key(self):
return (self.domain,)
def clear_site_cache(sender, **kwargs):
"""
Clear the cache (if primed) each time a site is saved or deleted.
"""
instance = kwargs["instance"]
using = kwargs["using"]
try:
del SITE_CACHE[instance.pk]
except KeyError:
pass
try:
del SITE_CACHE[Site.objects.using(using).get(pk=instance.pk).domain]
except (KeyError, Site.DoesNotExist):
pass
pre_save.connect(clear_site_cache, sender=Site)
pre_delete.connect(clear_site_cache, sender=Site)
| 30.295082
| 79
| 0.63934
|
5bfb71a41a82dce4ecfd0fe6c106a7c141b25e7d
| 927
|
py
|
Python
|
nomadgram/notifications/migrations/0002_auto_20190314_1205.py
|
SimJunSik/nomadgram
|
958f16824268a3e5cbeb449f8142209d367d731c
|
[
"MIT"
] | null | null | null |
nomadgram/notifications/migrations/0002_auto_20190314_1205.py
|
SimJunSik/nomadgram
|
958f16824268a3e5cbeb449f8142209d367d731c
|
[
"MIT"
] | 5
|
2020-06-05T20:17:40.000Z
|
2021-09-08T01:10:06.000Z
|
nomadgram/notifications/migrations/0002_auto_20190314_1205.py
|
SimJunSik/nomadgram
|
958f16824268a3e5cbeb449f8142209d367d731c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2019-03-14 03:05
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notification',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='notification',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='images.Image'),
),
]
| 28.96875
| 123
| 0.621359
|
e0cbb66fff93925310894ca72883b9ce8076f2a8
| 3,455
|
py
|
Python
|
mayan/apps/sources/widgets.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | 3
|
2020-02-03T11:58:51.000Z
|
2020-10-20T03:52:21.000Z
|
mayan/apps/sources/widgets.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/sources/widgets.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | 2
|
2020-10-24T11:10:06.000Z
|
2021-03-03T20:05:38.000Z
|
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from django.utils.html import strip_tags
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from converter.literals import DEFAULT_PAGE_NUMBER, DEFAULT_ROTATION, DEFAULT_ZOOM_LEVEL
from documents.conf.settings import THUMBNAIL_SIZE, PREVIEW_SIZE
class FamFamRadioFieldRenderer(forms.widgets.RadioFieldRenderer):
def render(self):
results = []
results.append(u'<ul>\n')
for w in self:
if w.choice_value:
famfam_template = u'<span class="famfam active famfam-%s" style="vertical-align: bottom;"></span>' % w.choice_value
else:
famfam_template = u'<span class="famfam active famfam-cross" style="vertical-align: bottom;"></span>'
results.append(u'<li class="undecorated_list">%s%s</li>' % (famfam_template, force_unicode(w)))
results.append(u'\n</ul>')
return mark_safe(u'\n'.join(results))
class FamFamRadioSelect(forms.widgets.RadioSelect):
renderer = FamFamRadioFieldRenderer
def staging_file_thumbnail(staging_file, **kwargs):
return staging_file_html_widget(staging_file, click_view='stagingfolderfile-image-view', **kwargs)
def staging_file_html_widget(staging_file, click_view=None, page=DEFAULT_PAGE_NUMBER, zoom=DEFAULT_ZOOM_LEVEL, rotation=DEFAULT_ROTATION, gallery_name=None, fancybox_class='fancybox-staging', image_class='lazy-load', title=None, size=THUMBNAIL_SIZE, nolazyload=False):
result = []
alt_text = _(u'staging file page image')
query_dict = {
'page': page,
'zoom': zoom,
'rotation': rotation,
'size': size,
}
if gallery_name:
gallery_template = u'rel="%s"' % gallery_name
else:
gallery_template = u''
query_string = urlencode(query_dict)
preview_view = u'%s?%s' % (reverse('stagingfolderfile-image-view', args=[staging_file.staging_folder.pk, staging_file.encoded_filename]), query_string)
plain_template = []
plain_template.append(u'<img src="%s" alt="%s" />' % (preview_view, alt_text))
result.append(u'<div class="tc" id="staging_file-%s-%d">' % (staging_file.filename, page if page else DEFAULT_PAGE_NUMBER))
if title:
title_template = u'title="%s"' % strip_tags(title)
else:
title_template = u''
if click_view:
# TODO: fix this hack
query_dict['size'] = PREVIEW_SIZE
query_string = urlencode(query_dict)
result.append(u'<a %s class="%s" href="%s" %s>' % (gallery_template, fancybox_class, u'%s?%s' % (reverse(click_view, args=[staging_file.staging_folder.pk, staging_file.encoded_filename]), query_string), title_template))
if nolazyload:
result.append(u'<img style="border: 1px solid black;" src="%s" alt="%s" />' % (preview_view, alt_text))
else:
result.append(u'<img class="thin_border %s" data-original="%s" src="%simages/ajax-loader.gif" alt="%s" />' % (image_class, preview_view, settings.STATIC_URL, alt_text))
result.append(u'<noscript><img style="border: 1px solid black;" src="%s" alt="%s" /></noscript>' % (preview_view, alt_text))
if click_view:
result.append(u'</a>')
result.append(u'</div>')
return mark_safe(u''.join(result))
| 40.647059
| 268
| 0.690883
|
4cd88cf135a6403e80af9e7ddad837dcac5fb245
| 9,248
|
py
|
Python
|
sdk/python/pulumi_azure_native/web/v20200901/web_app_backup_configuration.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/web/v20200901/web_app_backup_configuration.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/web/v20200901/web_app_backup_configuration.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppBackupConfiguration']
class WebAppBackupConfiguration(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_name: Optional[pulumi.Input[str]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupScheduleArgs']]] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Description of a backup which will be performed.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backup_name: Name of the backup.
:param pulumi.Input[pulumi.InputType['BackupScheduleArgs']] backup_schedule: Schedule for the backup if it is executed periodically.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]] databases: Databases included in the backup.
:param pulumi.Input[bool] enabled: True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] storage_account_url: SAS URL to the container.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['backup_name'] = backup_name
__props__['backup_schedule'] = backup_schedule
__props__['databases'] = databases
__props__['enabled'] = enabled
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if storage_account_url is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_url'")
__props__['storage_account_url'] = storage_account_url
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/latest:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/latest:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppBackupConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppBackupConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppBackupConfiguration, __self__).__init__(
'azure-native:web/v20200901:WebAppBackupConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppBackupConfiguration':
"""
Get an existing WebAppBackupConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["backup_name"] = None
__props__["backup_schedule"] = None
__props__["databases"] = None
__props__["enabled"] = None
__props__["kind"] = None
__props__["name"] = None
__props__["storage_account_url"] = None
__props__["system_data"] = None
__props__["type"] = None
return WebAppBackupConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the backup.
"""
return pulumi.get(self, "backup_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> pulumi.Output[Optional['outputs.BackupScheduleResponse']]:
"""
Schedule for the backup if it is executed periodically.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def databases(self) -> pulumi.Output[Optional[Sequence['outputs.DatabaseBackupSettingResponse']]]:
"""
Databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> pulumi.Output[str]:
"""
SAS URL to the container.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.166667
| 1,481
| 0.67074
|
ebf056812b0025810154ffeef59c87b2f8a28ead
| 5,217
|
py
|
Python
|
datahub/public_private_fund_members.py
|
Xingquan-Li/stock-1
|
0d8ff7ffd27e32cfcdd01d5ca217345b97ce950d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T07:41:22.000Z
|
2022-03-15T07:41:22.000Z
|
datahub/public_private_fund_members.py
|
Xingquan-Li/stock-1
|
0d8ff7ffd27e32cfcdd01d5ca217345b97ce950d
|
[
"BSD-3-Clause"
] | null | null | null |
datahub/public_private_fund_members.py
|
Xingquan-Li/stock-1
|
0d8ff7ffd27e32cfcdd01d5ca217345b97ce950d
|
[
"BSD-3-Clause"
] | null | null | null |
# 公募私募人员数据获取
import math
import sys
sys.path.append('..')
from configure.settings import DBSelector
from common.BaseService import BaseService
import requests
import warnings
import datetime
warnings.filterwarnings("ignore")
class FundMembers(BaseService):
def __init__(self, kind, date, first_use=False):
super(FundMembers, self).__init__(first_use)
self.lof_url = 'http://query.sse.com.cn/commonQuery.do?=&jsonCallBack=jsonpCallback1681&sqlId=COMMON_SSE_FUND_LOF_SCALE_CX_S&pageHelp.pageSize=10000&FILEDATE={}&_=161146986468'
self.etf_url = 'http://query.sse.com.cn/commonQuery.do?jsonCallBack=jsonpCallback28550&isPagination=true&pageHelp.pageSize=25&pageHelp.pageNo={}&pageHelp.cacheSize=1&sqlId=COMMON_SSE_ZQPZ_ETFZL_XXPL_ETFGM_SEARCH_L&STAT_DATE={}&pageHelp.beginPage={}&pageHelp.endPage=30&_=1611473902414'
self.db = DBSelector()
# self.today ='2021-01-22' # ETF
self.today_='' # TODO failed
self.ETF_COUNT_PER_PAGE = 25
self.url_option_dict = {
'ETF': {'url': self.etf_url, 'date': self.today},
'LOF': {'url': self.lof_url, 'date': self.today_}
}
self.kind = kind.lower()
self.session = requests.Session()
self.logger.info('start...sh fund')
self.LAST_TEXT = ''
if first_use:
self.create_table()
self.db_session = self.get_session()
self.sess = self.db_session()
def crawl_lof(self):
options = self.url_option_dict['LOF']
date = options.get('date')
url = options.get('url')
content = self.get(url.format(date), js=False)
js_data = self.jsonp2json(content)
self.process_lof(js_data)
@property
def headers(self):
return {
"Host": "query.sse.com.cn",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Referer": "http://www.sse.com.cn/market/funddata/volumn/lofvolumn/",
}
def process_lof(self, js_data):
result = js_data.get('result')
for item in result:
code = item['FUND_CODE']
name = item['FUND_ABBR']
date = item['TRADE_DATE']
try:
share = float(item['INTERNAL_VOL'].replace(',', ''))
except Exception as e:
print(e)
share = None
self.process_model(code, name, date, share, 'LOF')
def post(self, url, post_data, _josn=False, binary=False, retry=5):
pass
def crawl_etf(self):
options = self.url_option_dict['ETF']
date = options.get('date')
url = options.get('url')
current_page = 1
while True:
content = self.get(url.format(current_page, date, current_page), _json=False)
js_data = self.jsonp2json(content)
total_count = js_data.get('pageHelp').get('total')
print(f'page : {current_page}')
self.process_etf(js_data)
max_page = math.ceil(total_count / self.ETF_COUNT_PER_PAGE) # 每页 10个
if current_page > max_page:
break
current_page += 1
def process_etf(self, js_data):
result = js_data.get('result')
for item in result:
code = item['SEC_CODE']
name = item['SEC_NAME']
date = item['STAT_DATE']
share = item['TOT_VOL']
try:
share = float(share)
except Exception as e:
print(e)
self.process_model(code, name, date, share, 'ETF')
def run(self):
'LOF 与 ETF'
# for type_, options in self.url_option_dict.items():
if self.kind == 'etf':
self.logger.info('crawling etf .....')
self.crawl_etf()
if self.kind == 'lof':
self.logger.info('crawling lof .....')
self.crawl_lof()
def process_model(self, code, name, date, share, type_):
obj = self.sess.query(FundBaseInfoModel).filter_by(code=code).first()
if not obj:
obj = FundBaseInfoModel(
code=code,
name=name,
category=type_,
invest_type=None,
manager_name=None,
issue_date=None,
)
try:
self.sess.add(obj)
except Exception as e:
print(e)
else:
self.sess.commit()
print(f'插入一条记录{code},{date}')
if not self.sess.query(ShareModel).filter_by(code=code, date=date).first():
share_info = ShareModel(
code=code,
date=date,
share=share,
crawltime=datetime.datetime.now(),
)
try:
self.sess.add(share_info)
except Exception as e:
print(e)
else:
print(f'插入一条记录{code},{date}')
self.sess.commit()
| 32.811321
| 293
| 0.554533
|
a8140e97d075051e2c1f3cf4fe6cf3e427341d1c
| 72
|
py
|
Python
|
openml_tensorflow/config.py
|
prabhant/openml-tf
|
a3bc69c2aa404cb54bea1c492edb8a6941c1175b
|
[
"BSD-3-Clause"
] | null | null | null |
openml_tensorflow/config.py
|
prabhant/openml-tf
|
a3bc69c2aa404cb54bea1c492edb8a6941c1175b
|
[
"BSD-3-Clause"
] | 12
|
2020-01-28T23:09:20.000Z
|
2022-02-10T00:36:46.000Z
|
openml_tensorflow/config.py
|
prabhant/openml-tensorflow
|
a3bc69c2aa404cb54bea1c492edb8a6941c1175b
|
[
"BSD-3-Clause"
] | 3
|
2020-10-08T13:05:59.000Z
|
2021-05-10T05:34:11.000Z
|
# Config file to define all hyperparameters
epoch = 10
batch_size = 32
| 14.4
| 43
| 0.763889
|
4ac29a581d8deb5f5596fddaebf794a7edf89e12
| 4,489
|
py
|
Python
|
opsplugins/notification_subscription.py
|
OpenSwitchNOS/openswitch-ops-restd
|
baf2f631d53966b7084c7c99e88639d857a041c5
|
[
"Apache-2.0"
] | null | null | null |
opsplugins/notification_subscription.py
|
OpenSwitchNOS/openswitch-ops-restd
|
baf2f631d53966b7084c7c99e88639d857a041c5
|
[
"Apache-2.0"
] | null | null | null |
opsplugins/notification_subscription.py
|
OpenSwitchNOS/openswitch-ops-restd
|
baf2f631d53966b7084c7c99e88639d857a041c5
|
[
"Apache-2.0"
] | 1
|
2021-09-10T08:17:24.000Z
|
2021-09-10T08:17:24.000Z
|
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tornado.log import app_log
from opsvalidator.base import BaseValidator
from opsvalidator import error
from opsvalidator.error import ValidationError
from opsrest.utils.utils import get_column_data_from_row
from opsrest.notifications.constants import (
SUBSCRIBER_NAME,
SUBSCRIPTION_TABLE_LOWER,
SUBSCRIPTION_URI
)
from opsrest.constants import REQUEST_TYPE_READ
from opsrest.parse import parse_url_path
class NotificationSubscriptionValidator(BaseValidator):
resource = SUBSCRIPTION_TABLE_LOWER
def validate_modification(self, validation_args):
if validation_args.is_new:
schema = validation_args.schema
idl = validation_args.idl
subscriber_row = validation_args.p_resource_row
subscription_row = validation_args.resource_row
subscription_schema = validation_args.resource_schema
subscriber_name = get_column_data_from_row(subscriber_row,
SUBSCRIBER_NAME)
resource_uri = get_column_data_from_row(subscription_row,
SUBSCRIPTION_URI)
app_log.debug("Verifying if subscription can be added for "
"subscriber %s" % subscriber_name)
self._verify_duplicate_subscription(subscriber_name,
subscriber_row,
subscription_row,
subscription_schema,
resource_uri)
self._verify_valid_resource_uri(subscriber_name, subscription_row,
resource_uri, schema, idl)
def _verify_valid_resource_uri(self, subscriber_name, subscription_row,
resource_uri, schema, idl):
app_log.debug("Verifying a valid resource URI")
resource_path = parse_url_path(resource_uri, schema, idl,
REQUEST_TYPE_READ)
if resource_path is None:
app_log.debug("Invalid resource URI detected")
details = "Subscriber: %s. " % subscriber_name
details += "Invalid URI %s" % resource_uri
raise ValidationError(error.VERIFICATION_FAILED, details)
def _verify_duplicate_subscription(self, subscriber_name, subscriber_row,
subscription_row, subscription_schema,
resource_uri):
app_log.debug("Verifying if the subscription is a duplicate")
subscriber_subscriptions = \
get_column_data_from_row(subscriber_row,
subscription_schema.plural_name)
# Length == 1 indicates this is the only subscription
if not subscriber_subscriptions or \
len(subscriber_subscriptions) == 1:
app_log.debug("No duplicate resource subscriptions detected.")
return
# Compare the resource URI of the new subscription to parent's
# subscription resource URIs
for sub_name, sub_row in subscriber_subscriptions.iteritems():
# Skip if the subscription row is the current one that is
# being validated
if sub_row == subscription_row:
continue
curr_resource_uri = get_column_data_from_row(sub_row,
SUBSCRIPTION_URI)
if curr_resource_uri == resource_uri:
app_log.debug("Duplicate resource URI detected")
details = "Subscriber: %s. " % subscriber_name
details += "URI %s already exists" % resource_uri
raise ValidationError(error.DUPLICATE_RESOURCE, details)
| 45.343434
| 78
| 0.625306
|
bea118fc1b4eff811557b7db735f8f76d25c1912
| 1,325
|
py
|
Python
|
calvin/actorstore/systemactors/time/Timestamp.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 334
|
2015-06-04T15:14:28.000Z
|
2022-02-09T11:14:17.000Z
|
calvin/actorstore/systemactors/time/Timestamp.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 89
|
2015-06-13T19:15:35.000Z
|
2019-12-03T19:23:20.000Z
|
calvin/actorstore/systemactors/time/Timestamp.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 112
|
2015-06-06T19:16:54.000Z
|
2020-10-19T01:27:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, calvinlib
class Timestamp(Actor):
"""
Return the (UTC) time in seconds since Jan 1st 1970
Detailed information
Input:
trigger : any token
Output:
timestamp : floating point number
"""
@manage([])
def init(self):
self.setup()
def did_migrate(self):
self.setup()
def setup(self):
self.time = calvinlib.use('time')
@condition(['trigger'], ['timestamp'])
def action(self, consume_trigger):
return (self.time.timestamp(),)
action_priority = (action,)
requires = ['time']
test_set = [
{
'inports': {'trigger': [True]}
}
]
| 24.090909
| 74
| 0.649811
|
7fd8ce19682ebd5a9e6868984b019f164e25a65f
| 1,440
|
py
|
Python
|
test/__init__.py
|
EmbeddedML-EDAGroup/PIT
|
02897f6977b481d3072e9aa915aec0fe43faeb02
|
[
"Apache-2.0"
] | 2
|
2021-12-18T21:04:29.000Z
|
2022-01-04T14:14:27.000Z
|
test/__init__.py
|
EmbeddedML-EDAGroup/PIT
|
02897f6977b481d3072e9aa915aec0fe43faeb02
|
[
"Apache-2.0"
] | null | null | null |
test/__init__.py
|
EmbeddedML-EDAGroup/PIT
|
02897f6977b481d3072e9aa915aec0fe43faeb02
|
[
"Apache-2.0"
] | null | null | null |
#*----------------------------------------------------------------------------*
#* Copyright (C) 2021 Politecnico di Torino, Italy *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Author: Matteo Risso <matteo.risso@polito.it> *
#*----------------------------------------------------------------------------*
| 75.789474
| 79
| 0.360417
|
cac243b4540637be3fe1c1839a3202227fe026c0
| 3,133
|
py
|
Python
|
books_spider/books_spider/settings.py
|
behappycc/price-tracker
|
3ef8c451dc45f2775d6dbde19bb179d78d5fea97
|
[
"MIT"
] | null | null | null |
books_spider/books_spider/settings.py
|
behappycc/price-tracker
|
3ef8c451dc45f2775d6dbde19bb179d78d5fea97
|
[
"MIT"
] | null | null | null |
books_spider/books_spider/settings.py
|
behappycc/price-tracker
|
3ef8c451dc45f2775d6dbde19bb179d78d5fea97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for books_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'books_spider'
SPIDER_MODULES = ['books_spider.spiders']
NEWSPIDER_MODULE = 'books_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'books_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'books_spider.middlewares.BooksSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'books_spider.middlewares.BooksSpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'books_spider.pipelines.BooksSpiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.428571
| 102
| 0.778168
|
e9bc14f1e5ca9ff72e7653bb55aa3fc7c094bcfd
| 16,060
|
py
|
Python
|
rest-service/manager_rest/test/endpoints/test_blueprints.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/test/endpoints/test_blueprints.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/test/endpoints/test_blueprints.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import tempfile
import shutil
from nose.plugins.attrib import attr
from manager_rest import archiving
from manager_rest.storage import FileServer
from manager_rest.test import base_test
from cloudify_rest_client.exceptions import CloudifyClientError
from .test_utils import generate_progress_func
@attr(client_min_version=1, client_max_version=base_test.LATEST_API_VERSION)
class BlueprintsTestCase(base_test.BaseServerTestCase):
def test_get_empty(self):
result = self.client.blueprints.list()
self.assertEquals(0, len(result))
def test_get_nonexistent_blueprint(self):
try:
self.client.blueprints.get('15')
except CloudifyClientError, e:
self.assertEqual(404, e.status_code)
def test_server_traceback_on_error(self):
try:
self.client.blueprints.get('15')
except CloudifyClientError, e:
self.assertIsNotNone(e.server_traceback)
def test_post_and_then_search(self):
post_blueprints_response = self.put_file(
*self.put_blueprint_args(blueprint_id='hello_world')).json
self.assertEquals('hello_world', post_blueprints_response['id'])
get_blueprints_response = self.client.blueprints.list()
self.assertEquals(1, len(get_blueprints_response))
self.assertEquals(post_blueprints_response, get_blueprints_response[0])
def test_post_blueprint_already_exists(self):
self.put_file(*self.put_blueprint_args())
post_blueprints_response = self.put_file(*self.put_blueprint_args())
self.assertTrue('already exists' in
post_blueprints_response.json['message'])
self.assertEqual(409, post_blueprints_response.status_code)
def test_put_blueprint_archive(self):
self._test_put_blueprint_archive(archiving.make_targzfile, 'tar.gz')
def test_post_without_application_file_form_data(self):
post_blueprints_response = self.put_file(
*self.put_blueprint_args('blueprint_with_workflows.yaml',
blueprint_id='hello_world')).json
self.assertEquals('hello_world',
post_blueprints_response['id'])
@attr(client_min_version=2,
client_max_version=base_test.LATEST_API_VERSION)
def test_blueprint_description(self):
post_blueprints_response = self.put_file(
*self.put_blueprint_args('blueprint.yaml',
blueprint_id='blueprint')).json
self.assertEquals('blueprint',
post_blueprints_response['id'])
self.assertEquals("this is my blueprint's description",
post_blueprints_response['description'])
def test_get_blueprint_by_id(self):
post_blueprints_response = self.put_file(
*self.put_blueprint_args()).json
get_blueprint_by_id_response = self.get(
'/blueprints/{0}'.format(post_blueprints_response['id'])).json
# setting 'source' field to be None as expected
self.assertEquals(post_blueprints_response,
get_blueprint_by_id_response)
def test_delete_blueprint(self):
post_blueprints_response = self.put_file(
*self.put_blueprint_args()).json
# testing if resources are on fileserver
self.assertTrue(
self.check_if_resource_on_fileserver(
post_blueprints_response['id'], 'blueprint.yaml'))
# deleting the blueprint that was just uploaded
delete_blueprint_response = self.delete(
'/blueprints/{0}'.format(post_blueprints_response['id'])).json
self.assertEquals(post_blueprints_response['id'],
delete_blueprint_response['id'])
# verifying deletion of blueprint
resp = self.get('/blueprints/{0}'.format(post_blueprints_response[
'id']))
self.assertEquals(404, resp.status_code)
# verifying deletion of fileserver resources
self.assertFalse(
self.check_if_resource_on_fileserver(
post_blueprints_response['id'], 'blueprint.yaml'))
# trying to delete a nonexistent blueprint
resp = self.delete('/blueprints/nonexistent-blueprint')
self.assertEquals(404, resp.status_code)
def test_zipped_plugin(self):
self.put_file(*self.put_blueprint_args())
self.check_if_resource_on_fileserver('hello_world',
'plugins/stub-installer.zip')
def test_put_blueprint_archive_from_url(self):
port = 53230
blueprint_id = 'new_blueprint_id'
archive_path = self.archive_mock_blueprint(
archive_func=archiving.make_tarbz2file)
archive_filename = os.path.basename(archive_path)
archive_dir = os.path.dirname(archive_path)
archive_url = 'http://localhost:{0}/{1}'.format(
port, archive_filename)
fs = FileServer(archive_dir, False, port)
fs.start()
try:
self.wait_for_url(archive_url)
blueprint_id = self.client.blueprints.publish_archive(
archive_url,
blueprint_id).id
# verifying blueprint exists
result = self.client.blueprints.get(blueprint_id)
self.assertEqual(blueprint_id, result.id)
finally:
fs.stop()
def test_put_blueprint_archive_from_unavailable_url(self):
blueprint_id = 'new_blueprint_id'
resource_path = '/blueprints/{0}'.format(blueprint_id)
response = self.put(
resource_path,
None,
{'blueprint_archive_url': 'http://www.fake.url/does/not/exist'})
self.assertTrue("not found - can't download blueprint archive" in
response.json['message'])
self.assertEqual(400, response.status_code)
def test_put_blueprint_archive_from_malformed_url(self):
blueprint_id = 'new_blueprint_id'
resource_path = '/blueprints/{0}'.format(blueprint_id)
response = self.put(
resource_path,
None,
{'blueprint_archive_url': 'malformed/url_is.bad'})
self.assertIn("is malformed - can't download blueprint archive",
response.json['message'])
self.assertEqual(400, response.status_code)
def test_put_blueprint_archive_from_url_and_data(self):
blueprint_id = 'new_blueprint_id'
resource_path = '/blueprints/{0}'.format(blueprint_id)
response = self.put(
resource_path,
'data pretending to be the actual blueprint archive data',
{'blueprint_archive_url': 'malformed/url_is.bad'})
self.assertIn("Can't pass both", response.json['message'])
self.assertEqual(400, response.status_code)
def test_put_zip_archive(self):
self._test_put_blueprint_archive(archiving.make_zipfile, 'zip')
def test_put_tar_archive(self):
self._test_put_blueprint_archive(archiving.make_tarfile, 'tar')
def test_put_bz2_archive(self):
self._test_put_blueprint_archive(archiving.make_tarbz2file, 'tar.bz2')
def test_put_unsupported_archive_blueprint(self):
archive_path = tempfile.mkstemp()[1]
with open(archive_path, 'w') as f:
f.write('this is not a valid archive obviously')
response = self.put_file(
'/blueprints/unsupported_archive_bp',
archive_path)
self.assertIn("Blueprint archive is of an unrecognized format.",
response.json['message'])
self.assertEqual(400, response.status_code)
def test_put_blueprint_non_existing_filename(self):
blueprint_id = 'new_blueprint_id'
put_blueprints_response = self.put_file(
*self.put_blueprint_args(blueprint_id=blueprint_id,
blueprint_file_name='non-existing'))
self.assertEqual(
put_blueprints_response.json['message'],
'non-existing does not exist in the application directory')
self.assertEqual(put_blueprints_response.status_code, 400)
def test_put_blueprint_no_default_yaml(self):
blueprint_id = 'new_blueprint_id'
put_blueprints_response = self.put_file(
*self.put_blueprint_args(
blueprint_id=blueprint_id,
blueprint_dir='mock_blueprint_no_default'))
self.assertEqual(
put_blueprints_response.json['message'],
'application directory is missing blueprint.yaml and '
'application_file_name query parameter was not passed'
)
self.assertEqual(put_blueprints_response.status_code, 400)
@attr(client_min_version=2,
client_max_version=base_test.LATEST_API_VERSION)
def test_blueprint_main_file_name(self):
blueprint_id = 'blueprint_main_file_name'
blueprint_file = 'blueprint_with_inputs.yaml'
blueprint_path = os.path.join(
self.get_blueprint_path('mock_blueprint'),
blueprint_file)
response = self.client.blueprints.upload(blueprint_path, blueprint_id)
self.assertEqual(blueprint_file, response.main_file_name)
blueprint = self.client.blueprints.get(blueprint_id)
self.assertEqual(blueprint_file, blueprint.main_file_name)
blueprint = self.client.blueprints.list()[0]
self.assertEqual(blueprint_file, blueprint.main_file_name)
@attr(client_min_version=3,
client_max_version=base_test.LATEST_API_VERSION)
def test_sort_list(self):
blueprint_file = 'blueprint_with_inputs.yaml'
blueprint_path = os.path.join(
self.get_blueprint_path('mock_blueprint'),
blueprint_file)
self.client.blueprints.upload(blueprint_path, '0')
self.client.blueprints.upload(blueprint_path, '1')
blueprints = self.client.blueprints.list(sort='created_at')
self.assertEqual(2, len(blueprints))
self.assertEqual('0', blueprints[0].id)
self.assertEqual('1', blueprints[1].id)
blueprints = self.client.blueprints.list(
sort='created_at', is_descending=True)
self.assertEqual(2, len(blueprints))
self.assertEqual('1', blueprints[0].id)
self.assertEqual('0', blueprints[1].id)
@attr(client_min_version=3,
client_max_version=base_test.LATEST_API_VERSION)
def test_blueprint_upload_progress(self):
tmp_dir = '/tmp/tmp_upload_blueprint'
blueprint_path = self._create_big_blueprint('empty_blueprint.yaml',
tmp_dir)
size = self.client.blueprints.calc_size(blueprint_path)
progress_func = generate_progress_func(
total_size=size,
assert_equal=self.assertEqual,
assert_almost_equal=self.assertAlmostEqual)
try:
self.client.blueprints.upload(blueprint_path, '0',
progress_callback=progress_func)
finally:
self.quiet_delete_directory(tmp_dir)
@attr(client_min_version=3,
client_max_version=base_test.LATEST_API_VERSION)
def test_blueprint_download_progress(self):
tmp_dir = '/tmp/tmp_upload_blueprint'
tmp_local_path = '/tmp/blueprint.bl'
blueprint_path = self._create_big_blueprint('empty_blueprint.yaml',
tmp_dir)
size = self.client.blueprints.calc_size(blueprint_path)
try:
self.client.blueprints.upload(blueprint_path, '0')
progress_func = generate_progress_func(
total_size=size,
assert_equal=self.assertEqual,
assert_almost_equal=self.assertAlmostEqual)
self.client.blueprints.download('0', tmp_local_path, progress_func)
finally:
self.quiet_delete_directory(tmp_dir)
self.quiet_delete(tmp_local_path)
def _create_big_blueprint(self, blueprint, tmp_dir):
"""
Create a large file, and put it in a folder with some blueprint.
This is used in order to create a sizable blueprint archive, for
checking upload/download
:param blueprint: The name of the mock_blueprint file
:param tmp_dir: The folder that will be used to store the blueprint
and the new file
:return: The local path of the mock blueprint
"""
self.quiet_delete_directory(tmp_dir)
os.mkdir(tmp_dir)
blueprint_file = blueprint
blueprint_path = os.path.join(
self.get_blueprint_path('mock_blueprint'),
blueprint_file)
shutil.copy(blueprint_path, tmp_dir)
blueprint_path = os.path.join(tmp_dir, blueprint_file)
tmpfile_path = os.path.join(tmp_dir, 'tmp_file')
with open(tmpfile_path, 'wb') as big_file:
big_file.seek(32 * 1024 * 1024 - 1)
big_file.write('\0')
return blueprint_path
@attr(client_min_version=2,
client_max_version=base_test.LATEST_API_VERSION)
def test_blueprint_default_main_file_name(self):
blueprint_id = 'blueprint_default_main_file_name'
blueprint_file = 'blueprint.yaml'
response = self.put_file(
*self.put_blueprint_args(blueprint_id=blueprint_id)).json
self.assertEquals(blueprint_file, response['main_file_name'])
@attr(client_min_version=2,
client_max_version=base_test.LATEST_API_VERSION)
def test_publish_archive_blueprint_main_file_name(self):
port = 53230
blueprint_id = 'publish_archive_blueprint_main_file_name'
main_file_name = 'blueprint_with_workflows.yaml'
archive_path = self.archive_mock_blueprint()
archive_filename = os.path.basename(archive_path)
archive_dir = os.path.dirname(archive_path)
fs = FileServer(archive_dir, False, port)
fs.start()
try:
archive_url = 'http://localhost:{0}/{1}'.format(
port, archive_filename)
self.wait_for_url(archive_url)
response = self.client.blueprints.publish_archive(archive_url,
blueprint_id,
main_file_name)
finally:
fs.stop()
self.assertEqual(blueprint_id, response.id)
self.assertEqual(main_file_name, response.main_file_name)
def _test_put_blueprint_archive(self, archive_func, archive_type):
blueprint_id = 'new_blueprint_id'
put_blueprints_response = self.put_file(
*self.put_blueprint_args(blueprint_id=blueprint_id,
archive_func=archive_func)).json
self.assertEqual(blueprint_id, put_blueprints_response['id'])
url = self._version_url(
'/blueprints/{0}/archive'.format(blueprint_id))
response = self.app.get(url)
archive_filename = '{0}.{1}'.format(blueprint_id, archive_type)
self.assertTrue(archive_filename in
response.headers['Content-Disposition'])
self.assertTrue(archive_filename in
response.headers['X-Accel-Redirect'])
| 41.932115
| 79
| 0.657472
|
f880b7fcfa1b367483b71686308d1f4aebb9ff40
| 1,154
|
py
|
Python
|
quickstartproject/production.py
|
vmagelo/msdocs-python-django-webapp-quickstart
|
10ea918c191ec040000e1558d30a89e0a7ad9b28
|
[
"MIT"
] | 1
|
2022-03-21T13:34:34.000Z
|
2022-03-21T13:34:34.000Z
|
quickstartproject/production.py
|
vmagelo/msdocs-python-django-webapp-quickstart
|
10ea918c191ec040000e1558d30a89e0a7ad9b28
|
[
"MIT"
] | 3
|
2022-03-07T23:40:42.000Z
|
2022-03-08T22:38:37.000Z
|
quickstartproject/production.py
|
vmagelo/msdocs-python-django-webapp-quickstart
|
10ea918c191ec040000e1558d30a89e0a7ad9b28
|
[
"MIT"
] | 24
|
2022-01-18T22:15:19.000Z
|
2022-03-29T21:20:52.000Z
|
from .settings import *
import os
# Configure the domain name using the environment variable
# that Azure automatically creates for us.
ALLOWED_HOSTS = [os.environ['WEBSITE_HOSTNAME']] if 'WEBSITE_HOSTNAME' in os.environ else []
# WhiteNoise configuration
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# Add whitenoise middleware after the security middleware
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| 50.173913
| 92
| 0.62825
|
2cdd66db1d2add1a41dbe9e77aa2807eeacd2af8
| 101
|
py
|
Python
|
package_tool/demo/hello.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
package_tool/demo/hello.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
package_tool/demo/hello.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
def hello():
f = open('testdd.txt', 'r')
content = f.read()
f.close()
print(content)
| 16.833333
| 31
| 0.524752
|
96bd973cfd337315c13c9609bb9f013d032de298
| 4,070
|
py
|
Python
|
batracker/tests/localisation/test_spiesberger_wahlberg.py
|
thejasvibr/batracker
|
def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce
|
[
"MIT"
] | null | null | null |
batracker/tests/localisation/test_spiesberger_wahlberg.py
|
thejasvibr/batracker
|
def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce
|
[
"MIT"
] | null | null | null |
batracker/tests/localisation/test_spiesberger_wahlberg.py
|
thejasvibr/batracker
|
def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tests for Spiesberger & Wahlberg 2002
"""
import unittest
import numpy as np
np.random.seed(82319)
import scipy.spatial as spatial
from batracker.localisation import spiesberger_wahlberg_2002 as sw02
class SimpleTest(unittest.TestCase):
'''With tristar and one position
'''
def setUp(self):
# a tristar config
self.source_position = np.array([10,8,-2])
def make_tristar_geom(self):
R = 1.2
theta = np.pi/3
random_noise = np.random.normal(0,10**-6,4)
array_geom = np.array([[0,0.0,0],
[-R*np.sin(theta),0.0,-R*np.cos(theta)],
[ R*np.sin(theta),0.0,-R*np.cos(theta)],
[0,0.0,R]])
array_geom[:,1] = random_noise
return array_geom
def calculate_d_matrix(self, array_geom, source_position):
# create source positions
D_matrix = np.apply_along_axis(spatial.distance.euclidean, 1,array_geom,
source_position)
# %% Range difference , ref. Range of mic 4 (obtained from TDOAs)
d_matrix = D_matrix - D_matrix[0]
return d_matrix[1:].reshape(-1,1)
def test_simple(self):
#self.array_geom = self.array_geom - self.array_geom[0,:]
self.array_geom = self.make_tristar_geom()
self.d = self.calculate_d_matrix(self.array_geom, self.source_position)
output_positions = sw02.spiesberger_wahlberg_solution(self.array_geom, self.d)
num_matches = []
for each in output_positions:
num_matches.append(np.allclose(self.source_position, each, atol=10**-2))
self.assertEqual(sum(num_matches), 1)
def test_multiple_points(self):
self.array_geom = self.make_tristar_geom()
x_rand = np.random.choice(np.linspace(-10,10,50), 30)
y_rand = np.random.choice(np.linspace(0.2,20,50), 30)
z_rand = np.random.choice(np.linspace(-2,10,50), 30)
multiple_points = np.column_stack((x_rand, y_rand, z_rand))
all_calc_positions = []
for i, position in enumerate(multiple_points):
d = self.calculate_d_matrix(self.array_geom, position)
output_positions = sw02.spiesberger_wahlberg_solution(self.array_geom, d)
valid_position = list(filter(lambda X: X[1]>0, output_positions))[0]
all_calc_positions.append(valid_position)
all_true = np.allclose(np.array(all_calc_positions).reshape(-1,3),
multiple_points, atol=1e-2)
self.assertTrue(all_true)
def test_withgreaterthan4mics(self):
'''
A 15 microphone array placed everywhere
'''
n_mics = 15
num_sources = 20
self.array_geom = np.random.normal(0,1,n_mics*3).reshape(n_mics,-1)
self.array_geom *= 5
x_rand = np.linspace(-10,13,num_sources)
y_rand = np.linspace(0.2,20,num_sources)
z_rand = np.linspace(-2,10,num_sources)
for each in [x_rand, y_rand, z_rand]:
np.random.shuffle(each)
multiple_points = np.column_stack((x_rand, y_rand, z_rand))
all_calc_positions = []
for i in range(multiple_points.shape[0]):
position = multiple_points[i,:]
d = self.calculate_d_matrix(self.array_geom, position)
output_positions = sw02.spiesberger_wahlberg_solution(self.array_geom, d)
all_calc_positions.append(output_positions)
match = []
for i,(candidate1, candidate2) in enumerate(all_calc_positions):
match_result = np.logical_or(np.allclose(candidate1, multiple_points[i,:], atol=1e-2),
np.allclose(candidate2, multiple_points[i,:], atol=1e-2))
match.append(match_result)
self.assertTrue(np.all(match))
if __name__ == '__main__':
unittest.main()
| 37.33945
| 98
| 0.59828
|
7aa5cc5bf947184c4d1da0e6e7c30f8e4ae94f6a
| 482
|
py
|
Python
|
trick_source/er7_utils/sims/SIM_ball_L1/RUN_test/long_er7_velocity_verlet.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | null | null | null |
trick_source/er7_utils/sims/SIM_ball_L1/RUN_test/long_er7_velocity_verlet.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | null | null | null |
trick_source/er7_utils/sims/SIM_ball_L1/RUN_test/long_er7_velocity_verlet.py
|
gilbertguoze/trick
|
f0537efb0fa3cb5c0c84e36b60f055c1d1c60d21
|
[
"NASA-1.3"
] | null | null | null |
# This one of several ways to include files from the input file
# execfile("Modified_data/auto_test.dr")
# execfile("Modified_data/data_record.dr")
ball.state.input.print_off = 1
# Set the freeze frame rate
# trick.exec_set_freeze_frame(0.10)
# Configure integration
ball.trick_technique = trick.User_Defined
ball.er7_technique = trick.Integration.VelocityVerlet
ball.use_trick_integ = False
end_time = 600000.0 / 2
# Set the stop time
trick.exec_set_terminate_time(end_time)
| 25.368421
| 63
| 0.794606
|
37544c9ea99ab0b54570180f27d6e2583fa511ff
| 372
|
py
|
Python
|
Day-4/day-4-2-exercise/banker-roulette2.py
|
MihirMore/100daysofcode-Python
|
947d91842639c04ee7d23cc82bf04053d3982a85
|
[
"MIT"
] | 4
|
2021-04-09T20:01:22.000Z
|
2022-03-18T20:49:58.000Z
|
Day-4/day-4-2-exercise/banker-roulette2.py
|
MihirMore/100daysofcode-Python
|
947d91842639c04ee7d23cc82bf04053d3982a85
|
[
"MIT"
] | null | null | null |
Day-4/day-4-2-exercise/banker-roulette2.py
|
MihirMore/100daysofcode-Python
|
947d91842639c04ee7d23cc82bf04053d3982a85
|
[
"MIT"
] | null | null | null |
# Split string method
names_string = input("Give me everybody's names, separated by a comma. ")
names = names_string.split(", ")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
import random
random_choice = random.randint(0, len(names)-1)
person_who_will_pay = names[random_choice]
print(f"{person_who_will_pay} is going to buy the meal today!")
| 24.8
| 73
| 0.741935
|
011895383120f527c9cb92ad4e4f2c565141b215
| 18,098
|
py
|
Python
|
tests/test_transforms.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | null | null | null |
tests/test_transforms.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | null | null | null |
tests/test_transforms.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import logging as log
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Label, LabelCategories, Mask, MaskCategories, Points,
PointsCategories, Polygon, PolyLine,
)
from datumaro.components.extractor import DatasetItem
from datumaro.components.project import Dataset
from datumaro.util.test_utils import compare_datasets
import datumaro.plugins.transforms as transforms
import datumaro.util.mask_tools as mask_tools
from .requirements import Requirements, mark_requirement
class TransformsTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_reindex(self):
source = Dataset.from_iterable([
DatasetItem(id=10),
DatasetItem(id=10, subset='train'),
DatasetItem(id='a', subset='val'),
])
expected = Dataset.from_iterable([
DatasetItem(id=5),
DatasetItem(id=6, subset='train'),
DatasetItem(id=7, subset='val'),
])
actual = transforms.Reindex(source, start=5)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_mask_to_polygons(self):
source = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
Mask(np.array([
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
),
]),
])
expected = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
Polygon([1, 0, 3, 2, 3, 0, 1, 0]),
Polygon([5, 0, 5, 3, 8, 0, 5, 0]),
]),
])
actual = transforms.MasksToPolygons(source)
compare_datasets(self, expected, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_mask_to_polygons_small_polygons_message(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
Mask(np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0],
]),
),
]),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3))), ])
with self.assertLogs(level=log.DEBUG) as logs:
actual = transforms.MasksToPolygons(source_dataset)
compare_datasets(self, target_dataset, actual)
self.assertRegex('\n'.join(logs.output), 'too small polygons')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_polygons_to_masks(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
Polygon([0, 0, 4, 0, 4, 4]),
Polygon([5, 0, 9, 0, 5, 5]),
]),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)), annotations=[
Mask(np.array([
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
),
Mask(np.array([
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
),
]),
])
actual = transforms.PolygonsToMasks(source_dataset)
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_crop_covered_segments(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)), annotations=[
# The mask is partially covered by the polygon
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]],
), z_order=0),
Polygon([1, 1, 4, 1, 4, 4, 1, 4], z_order=1),
]),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)), annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
), z_order=0),
Polygon([1, 1, 4, 1, 4, 4, 1, 4], z_order=1),
]),
])
actual = transforms.CropCoveredSegments(source_dataset)
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_merge_instance_segments(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
),
z_order=0, group=1),
Polygon([1, 1, 4, 1, 4, 4, 1, 4],
z_order=1, group=1),
Polygon([0, 0, 0, 2, 2, 2, 2, 0],
z_order=1),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 0, 0]],
),
z_order=0, group=1),
Mask(np.array([
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=1),
]
),
])
actual = transforms.MergeInstanceSegments(source_dataset,
include_polygons=True)
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_map_subsets(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='a'),
DatasetItem(id=2, subset='b'),
DatasetItem(id=3, subset='c'),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset=''),
DatasetItem(id=2, subset='a'),
DatasetItem(id=3, subset='c'),
])
actual = transforms.MapSubsets(source_dataset,
{ 'a': '', 'b': 'a' })
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_shapes_to_boxes(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
), id=1),
Polygon([1, 1, 4, 1, 4, 4, 1, 4], id=2),
PolyLine([1, 1, 2, 1, 2, 2, 1, 2], id=3),
Points([2, 2, 4, 2, 4, 4, 2, 4], id=4),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Bbox(0, 0, 4, 4, id=1),
Bbox(1, 1, 3, 3, id=2),
Bbox(1, 1, 1, 1, id=3),
Bbox(2, 2, 2, 2, id=4),
]
),
])
actual = transforms.ShapesToBoxes(source_dataset)
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_id_from_image(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image='path.jpg'),
DatasetItem(id=2),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id='path', image='path.jpg'),
DatasetItem(id=2),
])
actual = transforms.IdFromImageName(source_dataset)
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_boxes_to_masks(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Bbox(0, 0, 3, 3, z_order=1),
Bbox(0, 0, 3, 1, z_order=2),
Bbox(0, 2, 3, 1, z_order=3),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=1),
Mask(np.array([
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=2),
Mask(np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=3),
]
),
])
actual = transforms.BoxesToMasks(source_dataset)
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_random_split(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset="a"),
DatasetItem(id=2, subset="a"),
DatasetItem(id=3, subset="b"),
DatasetItem(id=4, subset="b"),
DatasetItem(id=5, subset="b"),
DatasetItem(id=6, subset=""),
DatasetItem(id=7, subset=""),
])
actual = transforms.RandomSplit(source_dataset, splits=[
('train', 4.0 / 7.0),
('test', 3.0 / 7.0),
])
self.assertEqual(4, len(actual.get_subset('train')))
self.assertEqual(3, len(actual.get_subset('test')))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_random_split_gives_error_on_wrong_ratios(self):
source_dataset = Dataset.from_iterable([DatasetItem(id=1)])
with self.assertRaises(Exception):
transforms.RandomSplit(source_dataset, splits=[
('train', 0.5),
('test', 0.7),
])
with self.assertRaises(Exception):
transforms.RandomSplit(source_dataset, splits=[])
with self.assertRaises(Exception):
transforms.RandomSplit(source_dataset, splits=[
('train', -0.5),
('test', 1.5),
])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_remap_labels(self):
src_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
# Should be remapped
Label(1),
Bbox(1, 2, 3, 4, label=2),
Mask(image=np.array([1]), label=3),
# Should be deleted
Polygon([1, 1, 2, 2, 3, 4], label=4),
# Should be kept
PolyLine([1, 3, 4, 2, 5, 6]),
Bbox(4, 3, 2, 1, label=5),
])
], categories={
AnnotationType.label: LabelCategories.from_iterable(
'label%s' % i for i in range(6)),
AnnotationType.mask: MaskCategories(
colormap=mask_tools.generate_colormap(6)),
})
dst_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(1),
Bbox(1, 2, 3, 4, label=0),
Mask(image=np.array([1]), label=1),
PolyLine([1, 3, 4, 2, 5, 6], label=None),
Bbox(4, 3, 2, 1, label=2),
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
['label0', 'label9', 'label5']),
AnnotationType.mask: MaskCategories(colormap={
k: v for k, v in mask_tools.generate_colormap(6).items()
if k in { 0, 1, 3, 5 }
})
})
actual = transforms.RemapLabels(src_dataset, mapping={
'label1': 'label9', # rename & join with new label9 (from label3)
'label2': 'label0', # rename & join with existing label0
'label3': 'label9', # rename & join with new label9 (form label1)
'label4': '', # delete the label and associated annotations
# 'label5' - unchanged
}, default='keep')
compare_datasets(self, dst_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_remap_labels_delete_unspecified(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(0, id=0), # will be removed
Label(1, id=1),
Bbox(1, 2, 3, 4, label=None),
])
], categories=['label0', 'label1'])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(0, id=1),
]),
], categories=['label1'])
actual = transforms.RemapLabels(source_dataset,
mapping={ 'label1': 'label1' }, default='delete')
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_BUG_314)
def test_remap_labels_ignore_missing_labels_in_secondary_categories(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(0),
])
], categories={
AnnotationType.label: LabelCategories.from_iterable(['a', 'b', 'c']),
AnnotationType.points: PointsCategories.from_iterable([]), # all missing
AnnotationType.mask: MaskCategories.generate(2) # no c color
})
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(0),
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(['d', 'e', 'f']),
AnnotationType.points: PointsCategories.from_iterable([]),
AnnotationType.mask: MaskCategories.generate(2)
})
actual = transforms.RemapLabels(source_dataset,
mapping={ 'a': 'd', 'b': 'e', 'c': 'f' }, default='delete')
compare_datasets(self, target_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_transform_labels(self):
src_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(1),
Bbox(1, 2, 3, 4, label=2),
Bbox(1, 3, 3, 3),
Mask(image=np.array([1]), label=3),
Polygon([1, 1, 2, 2, 3, 4], label=4),
PolyLine([1, 3, 4, 2, 5, 6], label=5)
])
], categories=['label%s' % i for i in range(6)])
dst_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(1),
Label(2),
Label(3),
Label(4),
Label(5)
]),
], categories=['label%s' % i for i in range(6)])
actual = transforms.AnnsToLabels(src_dataset)
compare_datasets(self, dst_dataset, actual)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_bboxes_values_decrement_transform(self):
src_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(1),
Bbox(2, 3, 3, 4, label=2),
Bbox(1.3, 3.5, 3.33, 3.12)
])
], categories=['label%s' % i for i in range(6)])
dst_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(1),
Bbox(1, 2, 3, 4, label=2),
Bbox(0.3, 2.5, 3.33, 3.12)
]),
], categories=['label%s' % i for i in range(6)])
actual = transforms.BboxValuesDecrement(src_dataset)
compare_datasets(self, dst_dataset, actual)
| 36.635628
| 84
| 0.460382
|
115c383542c6f798ff71a9010e2c654939e70707
| 17,028
|
py
|
Python
|
docs/autogen.py
|
braingineer/keras
|
86dfaacb7296a4e3bb46f88ea08c24552c824ea1
|
[
"MIT"
] | 2
|
2017-09-13T21:17:54.000Z
|
2017-09-26T02:38:00.000Z
|
docs/autogen.py
|
braingineer/keras
|
86dfaacb7296a4e3bb46f88ea08c24552c824ea1
|
[
"MIT"
] | null | null | null |
docs/autogen.py
|
braingineer/keras
|
86dfaacb7296a4e3bb46f88ea08c24552c824ea1
|
[
"MIT"
] | 1
|
2019-11-11T10:51:10.000Z
|
2019-11-11T10:51:10.000Z
|
# -*- coding: utf-8 -*-
'''
General documentation architecture:
Home
Index
- Getting started
Getting started with the sequential model
Getting started with the functional api
Examples
FAQ
Installation guide
- Models
About Keras models
explain when one should use Sequential or functional API
explain compilation step
explain weight saving, weight loading
explain serialization, deserialization
Sequential
Model (functional API)
- Layers
About Keras layers
explain common layer functions: get_weights, set_weights, get_config
explain input_shape
explain usage on non-Keras tensors
Core layers
Convolutional
Recurrent
Embeddings
Normalization
Advanced activations
Noise
- Preprocessing
Image preprocessing
Text preprocessing
Sequence preprocessing
Objectives
Optimizers
Activations
Callbacks
Datasets
Backend
Initializations
Regularizers
Constraints
Visualization
Scikit-learn API
'''
from __future__ import print_function
import re
import inspect
import os
import shutil
from keras.layers import convolutional
from keras.layers import recurrent
from keras.layers import core
from keras.layers import noise
from keras.layers import normalization
from keras.layers import advanced_activations
from keras.layers import embeddings
from keras.layers import wrappers
from keras import optimizers
from keras import callbacks
from keras import models
from keras.engine import topology
from keras import objectives
from keras import backend
from keras import constraints
from keras import activations
from keras import regularizers
EXCLUDE = {
'Optimizer',
'Wrapper',
'get_session',
'set_session',
}
PAGES = [
{
'page': 'models/sequential.md',
'functions': [
models.Sequential.compile,
models.Sequential.fit,
models.Sequential.evaluate,
models.Sequential.predict,
models.Sequential.predict_classes,
models.Sequential.predict_proba,
models.Sequential.train_on_batch,
models.Sequential.test_on_batch,
models.Sequential.predict_on_batch,
models.Sequential.fit_generator,
models.Sequential.evaluate_generator,
],
},
{
'page': 'models/model.md',
'functions': [
models.Model.compile,
models.Model.fit,
models.Model.evaluate,
models.Model.predict,
models.Model.train_on_batch,
models.Model.test_on_batch,
models.Model.predict_on_batch,
models.Model.fit_generator,
models.Model.evaluate_generator,
models.Model.get_layer,
]
},
{
'page': 'layers/core.md',
'classes': [
core.Dense,
core.Activation,
core.Dropout,
core.Flatten,
core.Reshape,
core.Permute,
core.RepeatVector,
topology.Merge,
core.Lambda,
core.ActivityRegularization,
core.Masking,
core.Highway,
core.MaxoutDense,
core.TimeDistributedDense,
],
},
{
'page': 'layers/convolutional.md',
'classes': [
convolutional.Convolution1D,
convolutional.Convolution2D,
convolutional.Convolution3D,
convolutional.MaxPooling1D,
convolutional.MaxPooling2D,
convolutional.MaxPooling3D,
convolutional.AveragePooling1D,
convolutional.AveragePooling2D,
convolutional.AveragePooling3D,
convolutional.UpSampling1D,
convolutional.UpSampling2D,
convolutional.UpSampling3D,
convolutional.ZeroPadding1D,
convolutional.ZeroPadding2D,
convolutional.ZeroPadding3D,
],
},
{
'page': 'layers/recurrent.md',
'classes': [
recurrent.Recurrent,
recurrent.SimpleRNN,
recurrent.GRU,
recurrent.LSTM,
],
},
{
'page': 'layers/embeddings.md',
'classes': [
embeddings.Embedding,
],
},
{
'page': 'layers/normalization.md',
'classes': [
normalization.BatchNormalization,
],
},
{
'page': 'layers/advanced-activations.md',
'all_module_classes': [advanced_activations],
},
{
'page': 'layers/noise.md',
'all_module_classes': [noise],
},
{
'page': 'layers/wrappers.md',
'all_module_classes': [wrappers],
},
{
'page': 'optimizers.md',
'all_module_classes': [optimizers],
},
{
'page': 'callbacks.md',
'all_module_classes': [callbacks],
},
{
'page': 'backend.md',
'all_module_functions': [backend],
},
]
ROOT = 'http://keras.io/'
def get_earliest_class_that_defined_member(member, cls):
ancestors = get_classes_ancestors([cls])
result = None
for ancestor in ancestors:
if member in dir(ancestor):
result = ancestor
if not result:
return cls
return result
def get_classes_ancestors(classes):
ancestors = []
for cls in classes:
ancestors += cls.__bases__
filtered_ancestors = []
for ancestor in ancestors:
if ancestor.__name__ in ['object']:
continue
filtered_ancestors.append(ancestor)
if filtered_ancestors:
return filtered_ancestors + get_classes_ancestors(filtered_ancestors)
else:
return filtered_ancestors
def get_function_signature(function, method=True):
signature = inspect.getargspec(function)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults):], defaults)
args = args[:-len(defaults)]
else:
kwargs = []
st = '%s.%s(' % (function.__module__, function.__name__)
for a in args:
st += str(a) + ', '
for a, v in kwargs:
if type(v) == str:
v = '\'' + v + '\''
elif type(v) == unicode:
v = 'u\'' + v + '\''
st += str(a) + '=' + str(v) + ', '
if kwargs or args:
return st[:-2] + ')'
else:
return st + ')'
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace('__init__', cls.__name__)
except:
# in case the class inherits from object and does not
# define __init__
class_signature = cls.__module__ + '.' + cls.__name__ + '()'
return class_signature
def class_to_docs_link(cls):
module_name = cls.__module__
assert module_name[:6] == 'keras.'
module_name = module_name[6:]
link = ROOT + module_name.replace('.', '/') + '#' + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = cls.__module__
assert module_name[:6] == 'keras.'
path = module_name.replace('.', '/')
path += '.py'
line = inspect.getsourcelines(cls)[-1]
link = 'https://github.com/fchollet/keras/blob/master/' + path + '#L' + str(line)
return '[[source]](' + link + ')'
def code_snippet(snippet):
result = '```python\n'
result += snippet + '\n'
result += '```\n'
return result
def process_class_docstring(docstring):
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r' ([^\s\\]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
docstring = docstring.replace(' ' * 5, '\t\t')
docstring = docstring.replace(' ' * 3, '\t')
docstring = docstring.replace(' ', '')
return docstring
def process_function_docstring(docstring):
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r' ([^\s\\]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
docstring = docstring.replace(' ' * 6, '\t\t')
docstring = docstring.replace(' ' * 4, '\t')
docstring = docstring.replace(' ', '')
return docstring
print('Cleaning up existing sources directory.')
if os.path.exists('sources'):
shutil.rmtree('sources')
print('Populating sources directory with templates.')
for subdir, dirs, fnames in os.walk('templates'):
for fname in fnames:
new_subdir = subdir.replace('templates', 'sources')
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == '.md':
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace('templates', 'sources')
shutil.copy(fpath, new_fpath)
print('Starting autogeneration.')
for page_data in PAGES:
blocks = []
classes = page_data.get('classes', [])
for module in page_data.get('all_module_classes', []):
module_classes = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isclass(module_member):
cls = module_member
if cls.__module__ == module.__name__:
if cls not in module_classes:
module_classes.append(cls)
module_classes.sort(key=lambda x: id(x))
classes += module_classes
for cls in classes:
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' + class_to_source_link(cls) + '</span>')
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_class_docstring(docstring))
blocks.append('\n'.join(subblocks))
functions = page_data.get('functions', [])
for module in page_data.get('all_module_functions', []):
module_functions = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isfunction(module_member):
function = module_member
if module.__name__ in function.__module__:
if function not in module_functions:
module_functions.append(function)
module_functions.sort(key=lambda x: id(x))
functions += module_functions
for function in functions:
subblocks = []
signature = get_function_signature(function, method=False)
signature = signature.replace(function.__module__ + '.', '')
subblocks.append('### ' + function.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_function_docstring(docstring))
blocks.append('\n\n'.join(subblocks))
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = open(path).read()
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}} tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
open(path, 'w').write(mkdown)
# covered_so_far = set()
# for module, module_name in MODULES:
# class_pages = []
# for name in dir(module):
# if name in SKIP:
# continue
# if name[0] == '_':
# continue
# module_member = getattr(module, name)
# if module_member in covered_so_far:
# continue
# if inspect.isclass(module_member):
# cls = module_member
# if cls.__module__ == module_name:
# try:
# class_signature = get_function_signature(cls.__init__)
# class_signature = class_signature.replace('__init__', cls.__name__)
# except:
# # in case the class inherits from object and does not
# # define __init__
# class_signature = module_name + '.' + cls.__name__ + '()'
# functions = []
# functions_not_defined_here = []
# for name in dir(cls):
# if name in SKIP:
# continue
# if name[0] == '_':
# continue
# cls_member = getattr(cls, name)
# if inspect.isfunction(cls_member):
# function = cls_member
# signature = inspect.getargspec(function)
# defaults = signature.defaults
# args = signature.args[1:]
# if defaults:
# kwargs = zip(args[-len(defaults):], defaults)
# args = args[:-len(defaults)]
# else:
# kwargs = []
# defined_by = get_earliest_class_that_defined_member(function.__name__, cls)
# if cls == defined_by:
# functions.append(function)
# else:
# functions_not_defined_here.append((function, defined_by))
# blocks = []
# blocks.append('<span style="float:right;">' + class_to_source_link(cls) + '</span>')
# blocks.append('# ' + cls.__name__ + '\n')
# blocks.append(code_snippet(class_signature))
# docstring = cls.__doc__
# if docstring:
# blocks.append(process_class_docstring(docstring))
# if cls.__name__ in INCLUDE_functionS_FOR:
# if functions or functions_not_defined_here:
# blocks.append('### functions\n')
# for function in functions:
# signature = get_function_signature(function)
# signature = signature.replace(module_name + '.', '')
# blocks.append(code_snippet(signature))
# docstring = function.__doc__
# if docstring:
# blocks.append(process_function_docstring(docstring))
# for function, defined_by in functions_not_defined_here:
# signature = get_function_signature(function)
# function_module_name = function.__module__
# signature = signature.replace(function_module_name + '.', '')
# link = '[' + defined_by.__name__ + '](' + class_to_docs_link(defined_by) + ')'
# blocks.append(code_snippet(signature))
# blocks.append('Defined by ' + link + '.\n')
# mkdown = '\n'.join(blocks)
# class_pages.append((id(cls), mkdown))
# covered_so_far.add(module_member)
# class_pages.sort(key=lambda x: x[0])
# class_pages = [x[1] for x in class_pages]
# module_page = '\n----\n\n'.join(class_pages)
# # save module page.
# # Either insert content into existing page,
# # or create page otherwise
# path = 'sources/' + module_name.replace('.', '/')[6:] + '.md'
# if os.path.exists(path):
# template = open(path).read()
# assert '{{autogenerated}}' in template, ('Template found for ' + path +
# ' but missing {{autogenerated}} tag.')
# module_page = template.replace('{{autogenerated}}', module_page)
# print('...inserting autogenerated content into template:', path)
# else:
# print('...creating new page with autogenerated content:', path)
# subdir = os.path.dirname(path)
# if not os.path.exists(subdir):
# os.makedirs(subdir)
# open(path, 'w').write(module_page)
| 32.93617
| 108
| 0.558551
|
fe1ca719f2ce402b30e53ab99808b6115e75b405
| 4,891
|
py
|
Python
|
RNNG/scripts/evaluation.py
|
Psarpei/Recognition-of-logical-document-structures
|
5ab3cc44f43409191b2b2903491f8e1ac4a34026
|
[
"MIT"
] | 4
|
2020-12-13T20:19:51.000Z
|
2021-09-05T01:44:26.000Z
|
RNNG/scripts/evaluation.py
|
Psarpei/Recognition-of-logical-document-structures
|
5ab3cc44f43409191b2b2903491f8e1ac4a34026
|
[
"MIT"
] | 1
|
2021-02-12T23:52:47.000Z
|
2021-02-12T23:52:47.000Z
|
RNNG/scripts/evaluation.py
|
Psarpei/Recognition-of-logical-document-structures
|
5ab3cc44f43409191b2b2903491f8e1ac4a34026
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
file_ground_truth = open("C:/Users/pasca/Dropbox/PaktikumTextimaging/rnng-master/Thomas_Mann_Der_kleine_Herr_Friedemann_Ground_Truth.txt", "r") #Franz_Kafka_Auf_der_Galerie_Quelle_DigBib_Ground_Truth.txt","r") #Franz_Kafka_Das_Urteil_Ground_Truth.txt","r")
file_pred = open("C:/Users/pasca/Dropbox/PaktikumTextimaging/rnng-master/Thomas_Mann_Der_kleine_Herr_Friedemann_predict.txt", "r") #Franz_Kafka_Auf_der_Galerie_Quelle_DigBib_predict.txt","r") #Franz_Kafka_Das_Urteil_predict.txt", "r")
chars = ["(t", "(s", "(seg", "))", "T1","T2","T3","T4","T5","T6"]
def eval_char(list_gt, list_pred, char):
chars_gt = 0
chars_pred = 0
matches = 0
char_list = []
t_list = ["))",")))","))))",")))))","))))))",")))))))"]
for elem in list_gt:
if(char[0] == "T"):
if((t_list[int(char[1])-1] in elem[0]) and (t_list[int(char[1])] not in elem[0])):
chars_gt += 1
elif(char == elem[0] or (char in elem[0] and char == "))")):
chars_gt += elem[2]
for elem in list_pred:
if(char[0] == "T"):
if((t_list[int(char[1])-1] in elem[0]) and (t_list[int(char[1])] not in elem[0])):
chars_pred += 1
char_list.append(elem)
elif(char == elem[0] or (char in elem[0] and char == "))")):
chars_pred += elem[2]
char_list.append(elem)
gt_copy = deepcopy(list_gt)
for elem in char_list:
for i in range(len(gt_copy)):
if(elem[0] == gt_copy[i][0] and elem[1] == gt_copy[i][1]):
if(char[0] == "T"):
matches += 1
else:
matches += elem[2] if (elem[2] <= gt_copy[i][2]) else gt_copy[i][2]
gt_copy.pop(i)
break
if(char == "))"):
char = ")"
prec = 0 if chars_gt == 0 else (matches/chars_gt)*100
rec = 0 if chars_pred == 0 else (matches/chars_pred)*100
print("{:6s}|{:16d}|{:15d}|{:13.3f}|{:9.3f}".format(char, chars_gt, chars_pred, prec, rec))
gt = []
lines = file_ground_truth .read().splitlines()
for line in lines:
gt += line.split(" ")
help1= [] #only test
help2= [] # -_____-
"""
for i in range(len(gt)):
if("(w" in gt[i]):
help1.append(gt[i] + gt[i+1])
elif("(c" in gt[i]):
if("»" in gt[i+1] or "«" in gt[i+1] or "€" in gt[i+1]):
help1.append(gt[i] + gt[i+1])
else:
for char in gt[i+1]:
if(char != ")"):
help1.append(gt[i] + char)
"""
list_gt = []
counter = 0
for i in range(len(gt)):
if("(c" in gt[i]):
if("»" in gt[i+1] or "«" in gt[i+1] or "€" in gt[i+1]):
counter += 1
else:
for char in gt[i+1]:
counter += 1 if(char != ")") else 0 #for position from token
elif("(w" in gt[i]):
counter += 1
elif("(t" in gt[i] or "(s" in gt[i] or "(seg" in gt[i]):
list_gt.append((gt[i],counter,1))
elif("))" in gt[i]):
number = -1 #for # ")" ,-1 because 1 is for closed token w or c
for char in gt[i]:
number += 1 if(char == ")") else 0
list_gt.append((gt[i],counter,number))
else:
pass
print("#Ground Truth: ", len(list_gt))
pred = []
lines = file_pred.read().splitlines()
for line in lines:
split = line.split(" ")
help_ = 0 #for finding "(t ... "
for elem in split:
if("(t" == elem):
break
help_ += 1
pred += split[help_:]
"""
for i in range(len(pred)-1):
if("(XX" in pred[i]):
help2.append(pred[i] + pred[i+1])
for i in range(len(help1)):
print(help1[i],"_____",help2[i])########################################################
"""
list_pred = []
counter = 0
for elem in pred:
if("(XX" in elem):
counter += 1 #for position from token
elif("(t" in elem or "(s" in elem or "(seg" in elem):
list_pred.append((elem,counter,1))
elif("))" in elem):
number = -1 #for # ")" ,-1 because 1 is for closed token w or c
for char in elem:
if(char == ")"):
number += 1
list_pred.append((elem,counter,number))
else:
pass
"""
print("#Prediction: ", len(list_pred))
min_ = len(list_pred) if(len(list_pred) <= len(list_gt)) else len(list_gt)
for i in range(min_):
if("(t" in list_gt[i][0] or "(t" in list_pred[i]):
print(list_gt[i],"_____",list_pred[i])
"""
print("#Wörter und Zeichen Ground Truth:", list_gt[-1][1])
print("#Wörter und Zeichen Preciction:", list_pred[-1][1])
print("File: Franz_Kafka_Auf_der_Galerie_Quelle_DigBib")
print("Token | # Ground Truth | # predictions | % precision | % recall")
print("---------------------------------------------------------------")
for char in chars:
eval_char(list_gt, list_pred, char)
| 33.5
| 256
| 0.521775
|
450de7417df3efd12c9ee751ce86f94607ba5932
| 3,058
|
py
|
Python
|
LeetCode/279-perfect-squares.py
|
leaving-voider/LeetCode.cn-Record
|
2922cbdab85556bc0625adc9e6ce44849232e4f4
|
[
"MIT"
] | null | null | null |
LeetCode/279-perfect-squares.py
|
leaving-voider/LeetCode.cn-Record
|
2922cbdab85556bc0625adc9e6ce44849232e4f4
|
[
"MIT"
] | null | null | null |
LeetCode/279-perfect-squares.py
|
leaving-voider/LeetCode.cn-Record
|
2922cbdab85556bc0625adc9e6ce44849232e4f4
|
[
"MIT"
] | null | null | null |
###############################################################################################
# 同样用动规的思想去解,居然超时了没想到
###########
# 时间复杂度:O(n*sqrt(n)*total),total为总数/平方和
# 空间复杂度:O(n*sqrt(n)),动规数组开销
###############################################################################################
class Solution:
def numSquares(self, n: int) -> int:
sqrt_ = int(sqrt(n))
# 定义dp[i][j]:任意使用≤i的自然数的完全平方,组成和为j,使用的完全平方数的最少数量
dp = [[0]*(n+1) for _ in range(sqrt_+1)]
for i in range(1, sqrt_+1):
dp[i][1] = 1
for i in range(1, n+1):
dp[1][i] = i
for i in range(2, sqrt_+1):
square = i**2
for j in range(2, n+1):
if square > j: # 没法使用这个square
dp[i][j] = dp[i-1][j]
else:
dp[i][j] = min([dp[i-1][j-per*square]+per for per in range(j//square+1)])
return dp[sqrt_][n]
## 使用滚动数组,稍微化简后如下,但还是超时
class Solution:
def numSquares(self, n: int) -> int:
sqrt_ = int(sqrt(n))
# 定义dp[i][j]:任意使用≤i的自然数的完全平方,组成和为j,使用的完全平方数的最少数量
dp = [0]*(n+1) # 直接从第1层开始
for i in range(1, n+1):
dp[i] = i
for i in range(2, sqrt_+1):
square = i**2
for j in range(n, square-1, -1):
dp[j] = min([dp[j-per*square]+per for per in range(j//square+1)])
return dp[n]
###############################################################################################
# 继续化简,去掉第三重循环,只需要从小到大遍历每一个dp[j]的时候,和前面的dp[j-square]取min即可
# 至此,成功没看官方解法,解出此题,看来我对动规算法已经稍稍入门了
###########
# 时间复杂度:O(n*sqrt(n))
# 空间复杂度:O(n)
###############################################################################################
class Solution:
def numSquares(self, n: int) -> int:
# 定义dp[i][j]:任意使用≤i的自然数的完全平方,组成和为j,使用的完全平方数的最少数量
dp = list(range(n+1)) # 直接从第1层开始
for i in range(2, int(sqrt(n))+1):
square = i**2
for j in range(square, n+1):
dp[j] = min(dp[j], dp[j-square]+1)
return dp[n]
###############################################################################################
# 官方竟然给出了一个基于数学的定理【四平方和定理】,直接代替本题的动规法;仅判断四种情况即可
###########
# 时间复杂度:O(sqrt(n)), 判断能否被 4 整除的while循环的时间复杂度为 O(logn),忽略
# 空间复杂度:O(1)
###############################################################################################
class Solution:
# 判断是否为完全平方数
def isPerfectSquare(self, x):
y = int(sqrt(x))
return y * y == x
# 判断是否能表示为 4^k*(8m+7)
def checkAnswer4(self, x):
while (x % 4 == 0):
x /= 4
return x % 8 == 7
def numSquares(self, n: int) -> int:
if self.isPerfectSquare(n): # 判断是否为完全平方数
return 1
if self.checkAnswer4(n): # 判断是否能表示为 4^k*(8m+7)
return 4
for i in range(1, int(sqrt(n))+1):
if (self.isPerfectSquare(n - i**2)):
return 2
return 3
| 35.976471
| 96
| 0.397973
|
816f021232314eb03f88bec3efda00487bb916c2
| 1,635
|
py
|
Python
|
alipay/aop/api/response/AlipayFinanceFundFundnetvaluesBatchqueryResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayFinanceFundFundnetvaluesBatchqueryResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayFinanceFundFundnetvaluesBatchqueryResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.NetValueVO import NetValueVO
class AlipayFinanceFundFundnetvaluesBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFinanceFundFundnetvaluesBatchqueryResponse, self).__init__()
self._fund_code = None
self._fund_type = None
self._net_values = None
@property
def fund_code(self):
return self._fund_code
@fund_code.setter
def fund_code(self, value):
self._fund_code = value
@property
def fund_type(self):
return self._fund_type
@fund_type.setter
def fund_type(self, value):
self._fund_type = value
@property
def net_values(self):
return self._net_values
@net_values.setter
def net_values(self, value):
if isinstance(value, list):
self._net_values = list()
for i in value:
if isinstance(i, NetValueVO):
self._net_values.append(i)
else:
self._net_values.append(NetValueVO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayFinanceFundFundnetvaluesBatchqueryResponse, self).parse_response_content(response_content)
if 'fund_code' in response:
self.fund_code = response['fund_code']
if 'fund_type' in response:
self.fund_type = response['fund_type']
if 'net_values' in response:
self.net_values = response['net_values']
| 30.849057
| 121
| 0.658716
|
225a9440ef4e863d0895ea7e69c825e3dda25ca1
| 33,512
|
py
|
Python
|
client/verta/verta/repository/_commit.py
|
TMFRook/modeldb
|
7096b8fd7e98dc97c3c87cf0ce0e3862ff042f6c
|
[
"Apache-2.0"
] | null | null | null |
client/verta/verta/repository/_commit.py
|
TMFRook/modeldb
|
7096b8fd7e98dc97c3c87cf0ce0e3862ff042f6c
|
[
"Apache-2.0"
] | null | null | null |
client/verta/verta/repository/_commit.py
|
TMFRook/modeldb
|
7096b8fd7e98dc97c3c87cf0ce0e3862ff042f6c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import collections
from datetime import datetime
import heapq
import time
import requests
from .._protos.public.modeldb.versioning import VersioningService_pb2 as _VersioningService
from ..external import six
from .._internal_utils import (
_artifact_utils,
_utils,
)
from .. import code
from .. import configuration
from .. import dataset
from .. import environment
from . import _blob
from . import _diff
class Commit(object):
"""
Commit within a ModelDB Repository.
There should not be a need to instantiate this class directly; please use
:meth:`Repository.get_commit() <verta.repository.Repository.get_commit>`.
Attributes
----------
id : str or None
ID of the Commit, or ``None`` if the Commit has not yet been saved.
"""
def __init__(self, conn, repo, commit_msg, branch_name=None):
self._conn = conn
self._commit_json = _utils.proto_to_json(commit_msg) # dict representation of Commit protobuf
self._repo = repo
self._parent_ids = list(collections.OrderedDict.fromkeys(commit_msg.parent_shas or [])) # remove duplicates while maintaining order
self.branch_name = branch_name # TODO: find a way to clear if branch is moved
self._blobs = dict() # will be loaded when needed
self._loaded_from_remote = False
@property
def id(self):
return self._commit_json['commit_sha'] or None
@property
def parent(self):
return self._repo.get_commit(id=self._parent_ids[0]) if self._parent_ids else None
def _lazy_load_blobs(self):
if self._loaded_from_remote:
return
# until Commits can be created from blob diffs, load in blobs
if self.id is not None:
self._update_blobs_from_commit(self.id)
else:
for parent_id in self._parent_ids:
# parents will be read in first-to-last, possibly overwriting previous blobs
self._update_blobs_from_commit(parent_id)
self._loaded_from_remote = True
def describe(self):
self._lazy_load_blobs()
contents = '\n'.join((
"{} ({}.{})".format(path, blob.__class__.__module__.split('.')[1], blob.__class__.__name__)
for path, blob
in sorted(six.viewitems(self._blobs))
))
if not contents:
contents = "<no contents>"
components = [self.__repr__(), 'Contents:', contents]
return '\n'.join(components)
def __repr__(self):
branch_and_tag = ' '.join((
"Branch: {}".format(self.branch_name) if self.branch_name is not None else '',
# TODO: put tag here
))
if self.id is None:
header = "unsaved Commit"
if branch_and_tag:
header = header + " (was {})".format(branch_and_tag)
else:
header = "Commit {}".format(self.id)
if branch_and_tag:
header = header + " ({})".format(branch_and_tag)
# TODO: add author
# TODO: make data more similar to git
date_created = int(self._commit_json['date_created']) # protobuf uint64 is str, so cast to int
date = 'Date: ' + datetime.fromtimestamp(date_created/1000.).strftime('%Y-%m-%d %H:%M:%S')
message = '\n'.join(' ' + c for c in self._commit_json['message'].split('\n'))
components = [header, date, '', message, '']
return '\n'.join(components)
@classmethod
def _from_id(cls, conn, repo, id_, **kwargs):
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits/{}".format(
conn.scheme,
conn.socket,
repo.id,
id_,
)
response = _utils.make_request("GET", endpoint, conn)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response),
_VersioningService.GetCommitRequest.Response)
commit_msg = response_msg.commit
return cls(conn, repo, commit_msg, **kwargs)
@staticmethod
def _raise_lookup_error(path):
e = LookupError("Commit does not contain path \"{}\"".format(path))
six.raise_from(e, None)
# TODO: consolidate this with similar method in `_ModelDBEntity`
def _get_url_for_artifact(self, blob_path, dataset_component_path, method, part_num=0):
"""
Obtains a URL to use for accessing stored artifacts.
Parameters
----------
blob_path : str
Path to blob within repo.
dataset_component_path : str
Filepath in dataset component blob.
method : {'GET', 'PUT'}
HTTP method to request for the generated URL.
part_num : int, optional
If using Multipart Upload, number of part to be uploaded.
Returns
-------
response_msg : `_VersioningService.GetUrlForBlobVersioned.Response`
Backend response.
"""
if method.upper() not in ("GET", "PUT"):
raise ValueError("`method` must be one of {'GET', 'PUT'}")
Message = _VersioningService.GetUrlForBlobVersioned
msg = Message(
location=path_to_location(blob_path),
path_dataset_component_blob_path=dataset_component_path,
method=method,
part_number=part_num,
)
data = _utils.proto_to_json(msg)
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits/{}/getUrlForBlobVersioned".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
self.id,
)
response = _utils.make_request("POST", endpoint, self._conn, json=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(response.json(), Message.Response)
url = response_msg.url
# accommodate port-forwarded NFS store
if 'https://localhost' in url[:20]:
url = 'http' + url[5:]
if 'localhost%3a' in url[:20]:
url = url.replace('localhost%3a', 'localhost:')
if 'localhost%3A' in url[:20]:
url = url.replace('localhost%3A', 'localhost:')
response_msg.url = url
return response_msg
# TODO: consolidate this with similar method in `ExperimentRun`
def _upload_artifact(self, blob_path, dataset_component_path, file_handle, part_size=_artifact_utils._64MB):
"""
Uploads `file_handle` to ModelDB artifact store.
Parameters
----------
blob_path : str
Path to blob within repo.
dataset_component_path : str
Filepath in dataset component blob.
file_handle : file-like
Artifact to be uploaded.
part_size : int, default 64 MB
If using multipart upload, number of bytes to upload per part.
"""
file_handle.seek(0)
# check if multipart upload ok
url_for_artifact = self._get_url_for_artifact(blob_path, dataset_component_path, "PUT", part_num=1)
print("uploading {} to ModelDB".format(dataset_component_path))
if url_for_artifact.multipart_upload_ok:
# TODO: parallelize this
file_parts = iter(lambda: file_handle.read(part_size), b'')
for part_num, file_part in enumerate(file_parts, start=1):
print("uploading part {}".format(part_num), end='\r')
# get presigned URL
url = self._get_url_for_artifact(blob_path, dataset_component_path, "PUT", part_num=part_num).url
# wrap file part into bytestream to avoid OverflowError
# Passing a bytestring >2 GB (num bytes > max val of int32) directly to
# ``requests`` will overwhelm CPython's SSL lib when it tries to sign the
# payload. But passing a buffered bytestream instead of the raw bytestring
# indicates to ``requests`` that it should perform a streaming upload via
# HTTP/1.1 chunked transfer encoding and avoid this issue.
# https://github.com/psf/requests/issues/2717
part_stream = six.BytesIO(file_part)
# upload part
response = _utils.make_request("PUT", url, self._conn, data=part_stream)
_utils.raise_for_http_error(response)
# commit part
url = "{}://{}/api/v1/modeldb/versioning/commitVersionedBlobArtifactPart".format(
self._conn.scheme,
self._conn.socket,
)
msg = _VersioningService.CommitVersionedBlobArtifactPart(
commit_sha=self.id,
location=path_to_location(blob_path),
path_dataset_component_blob_path=dataset_component_path,
)
msg.repository_id.repo_id = self._repo.id
msg.artifact_part.part_number = part_num
msg.artifact_part.etag = response.headers['ETag']
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST", url, self._conn, json=data)
_utils.raise_for_http_error(response)
print()
# complete upload
url = "{}://{}/api/v1/modeldb/versioning/commitMultipartVersionedBlobArtifact".format(
self._conn.scheme,
self._conn.socket,
)
msg = _VersioningService.CommitMultipartVersionedBlobArtifact(
commit_sha=self.id,
location=path_to_location(blob_path),
path_dataset_component_blob_path=dataset_component_path,
)
msg.repository_id.repo_id = self._repo.id
data = _utils.proto_to_json(msg)
response = _utils.make_request("POST", url, self._conn, json=data)
_utils.raise_for_http_error(response)
else:
# upload full artifact
if url_for_artifact.fields:
# if fields were returned by backend, make a POST request and supply them as form fields
response = _utils.make_request(
"POST", url_for_artifact.url, self._conn,
# requests uses the `files` parameter for sending multipart/form-data POSTs.
# https://stackoverflow.com/a/12385661/8651995
# the file contents must be the final form field
# https://docs.aws.amazon.com/AmazonS3/latest/dev/HTTPPOSTForms.html#HTTPPOSTFormFields
files=list(url_for_artifact.fields.items()) + [('file', file_handle)],
)
else:
response = _utils.make_request("PUT", url_for_artifact.url, self._conn, data=file_handle)
_utils.raise_for_http_error(response)
print("upload complete")
def _update_blobs_from_commit(self, id_):
"""Fetches commit `id_`'s blobs and stores them as objects in `self._blobs`."""
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits/{}/blobs".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
id_,
)
response = _utils.make_request("GET", endpoint, self._conn)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response),
_VersioningService.ListCommitBlobsRequest.Response)
self._blobs.update({
'/'.join(blob_msg.location): blob_msg_to_object(blob_msg.blob)
for blob_msg
in response_msg.blobs
})
def _become_child(self):
"""
This method is for when `self` had been saved and is then modified, meaning that
this commit object has become a child of the commit that had been saved.
"""
self._lazy_load_blobs()
self._parent_ids = [self.id]
self._commit_json['commit_sha'] = ""
def _become_saved_child(self, child_id):
"""
This method is for when a child commit is created in the back end from `self`, and `self`
and its branch need to be updated to become that newly-created commit.
"""
if self.branch_name is not None:
# update branch to child commit
set_branch(self._conn, self._repo.id, child_id, self.branch_name)
new_commit = self._repo.get_commit(branch=self.branch_name)
else:
new_commit = self._repo.get_commit(id=child_id)
self.__dict__ = new_commit.__dict__
def _to_create_msg(self, commit_message):
self._lazy_load_blobs()
msg = _VersioningService.CreateCommitRequest()
msg.repository_id.repo_id = self._repo.id # pylint: disable=no-member
msg.commit.parent_shas.extend(self._parent_ids) # pylint: disable=no-member
msg.commit.message = commit_message
for path, blob in six.viewitems(self._blobs):
blob_msg = _VersioningService.BlobExpanded()
blob_msg.location.extend(path_to_location(path)) # pylint: disable=no-member
blob_msg.blob.CopyFrom(blob._as_proto())
msg.blobs.append(blob_msg) # pylint: disable=no-member
return msg
def walk(self):
"""
Generates folder names and blob names in this commit by walking through its folder tree.
Similar to the Python standard library's ``os.walk()``, the yielded `folder_names` can be
modified in-place to remove subfolders from upcoming iterations or alter the order in which
they are to be visited.
Note that, also similar to ``os.walk()``, `folder_names` and `blob_names` are simply the
*names* of those entities, and *not* their full paths.
Yields
------
folder_path : str
Path to current folder.
folder_names : list of str
Names of subfolders in `folder_path`.
blob_names : list of str
Names of blobs in `folder_path`.
"""
if self.id is None:
raise RuntimeError("Commit must be saved before it can be walked")
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits/{}/path".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
self.id,
)
locations = [()]
while locations:
location = locations.pop()
msg = _VersioningService.GetCommitComponentRequest()
msg.location.extend(location) # pylint: disable=no-member
data = _utils.proto_to_json(msg)
response = _utils.make_request("GET", endpoint, self._conn, params=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response), msg.Response)
folder_msg = response_msg.folder
folder_path = '/'.join(location)
folder_names = list(sorted(element.element_name for element in folder_msg.sub_folders))
blob_names = list(sorted(element.element_name for element in folder_msg.blobs))
yield (folder_path, folder_names, blob_names)
locations.extend(
location + (folder_name,)
for folder_name
in reversed(folder_names) # maintains order, because locations are popped from end
)
def update(self, path, blob):
"""
Adds `blob` to this Commit at `path`.
If `path` is already in this Commit, it will be updated to the new `blob`.
Parameters
----------
path : str
Location to add `blob` to.
blob : :mod:`~verta._repository.blob`
ModelDB versioning blob.
"""
if not isinstance(blob, _blob.Blob):
raise TypeError("unsupported type {}".format(type(blob)))
self._lazy_load_blobs()
if self.id is not None:
self._become_child()
self._blobs[path] = blob
def get(self, path):
"""
Retrieves the blob at `path` from this Commit.
Parameters
----------
path : str
Location of a blob.
Returns
-------
blob : :mod:`~verta._repository.blob`
ModelDB versioning blob.
Raises
------
LookupError
If `path` is not in this Commit.
"""
self._lazy_load_blobs()
try:
blob = self._blobs[path]
except KeyError:
self._raise_lookup_error(path)
if isinstance(blob, dataset._Dataset):
# for _Dataset.download()
blob._set_commit_and_blob_path(self, path)
return blob
def remove(self, path):
"""
Deletes the blob at `path` from this Commit.
Parameters
----------
path : str
Location of a blob.
Raises
------
LookupError
If `path` is not in this Commit.
"""
self._lazy_load_blobs()
if self.id is not None:
self._become_child()
try:
del self._blobs[path]
except KeyError:
self._raise_lookup_error(path)
def save(self, message):
"""
Saves this commit to ModelDB.
.. note::
If this commit contains new S3 datasets to be versioned by ModelDB, a very large
temporary download may occur before uploading them to ModelDB.
Parameters
----------
message : str
Description of this Commit.
"""
# prepare ModelDB-versioned blobs, and track for upload after commit save
mdb_versioned_blobs = dict()
for blob_path, blob in self._blobs.items():
if isinstance(blob, dataset._Dataset) and blob._mdb_versioned:
blob._prepare_components_to_upload()
mdb_versioned_blobs[blob_path] = blob
msg = self._to_create_msg(commit_message=message)
self._save(msg)
# upload ModelDB-versioned blobs
for blob_path, blob in mdb_versioned_blobs.items():
for component in blob._components_map.values():
if component._internal_versioned_path:
with open(component._local_path, 'rb') as f:
self._upload_artifact(blob_path, component.path, f)
blob._clean_up_uploaded_components()
def _save(self, proto_message):
data = _utils.proto_to_json(proto_message)
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
)
response = _utils.make_request("POST", endpoint, self._conn, json=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response), proto_message.Response)
self._become_saved_child(response_msg.commit.commit_sha)
# TODO: Add ways to retrieve and delete tag
def tag(self, tag):
"""
Assigns a tag to this Commit.
Parameters
----------
tag : str
Tag.
Raises
------
RuntimeError
If this Commit has not yet been saved.
"""
if self.id is None:
raise RuntimeError("Commit must be saved before it can be tagged")
data = self.id
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/tags/{}".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
tag,
)
response = _utils.make_request("PUT", endpoint, self._conn, json=data)
_utils.raise_for_http_error(response)
def log(self):
"""
Yields ancestors, starting from this Commit until the root of the Repository.
Analogous to ``git log``.
Yields
------
commit : :class:`Commit`
Ancestor commit.
"""
if self.id is None: # unsaved commit
# use parent
commit_id = self._parent_ids[0]
else:
commit_id = self.id
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits/{}/log".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
commit_id,
)
response = _utils.make_request("GET", endpoint, self._conn)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response),
_VersioningService.ListCommitsLogRequest.Response)
commits = response_msg.commits
for c in commits:
yield Commit(self._conn, self._repo, c, self.branch_name if c.commit_sha == commit_id else None)
def new_branch(self, branch):
"""
Creates a branch at this Commit and returns the checked-out branch.
If `branch` already exists, it will be moved to this Commit.
Parameters
----------
branch : str
Branch name.
Returns
-------
commit : :class:`Commit`
This Commit as the head of `branch`.
Raises
------
RuntimeError
If this Commit has not yet been saved.
Examples
--------
.. code-block:: python
master = repo.get_commit(branch="master")
dev = master.new_branch("development")
"""
if self.id is None:
raise RuntimeError("Commit must be saved before it can be attached to a branch")
set_branch(self._conn, self._repo.id, self.id, branch)
return self._repo.get_commit(branch=branch)
def diff_from(self, reference=None):
"""
Returns the diff from `reference` to `self`.
Parameters
----------
reference : :class:`Commit`, optional
Commit to be compared to.
Returns
-------
:class:`~verta.repository.Diff`
Commit diff.
Raises
------
RuntimeError
If this Commit or `reference` has not yet been saved, or if they do not belong to the
same Repository.
"""
if self.id is None:
raise RuntimeError("Commit must be saved before a diff can be calculated")
if reference is None:
reference_id = self._parent_ids[0]
elif not isinstance(reference, Commit) or reference.id is None:
raise TypeError("`reference` must be a saved Commit")
elif self._repo.id != reference._repo.id:
raise ValueError("Commit and `reference` must belong to the same Repository")
else:
reference_id = reference.id
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/diff?commit_a={}&commit_b={}".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
reference_id,
self.id,
)
response = _utils.make_request("GET", endpoint, self._conn)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response),
_VersioningService.ComputeRepositoryDiffRequest.Response)
return _diff.Diff(response_msg.diffs)
def apply_diff(self, diff, message, other_parents=[]):
"""
Applies a diff to this Commit.
This method creates a new Commit in ModelDB, and assigns a new ID to this object.
Parameters
----------
diff : :class:`~verta._repository.diff.Diff`
Commit diff.
message : str
Description of the diff.
Raises
------
RuntimeError
If this Commit has not yet been saved.
"""
if self.id is None:
raise RuntimeError("Commit must be saved before a diff can be applied")
msg = _VersioningService.CreateCommitRequest()
msg.repository_id.repo_id = self._repo.id
msg.commit.parent_shas.append(self.id)
msg.commit.parent_shas.extend(other_parents)
msg.commit.message = message
msg.commit_base = self.id
msg.diffs.extend(diff._diffs)
self._save(msg)
def get_revert_diff(self):
return self.parent.diff_from(self)
def revert(self, other=None, message=None):
"""
Reverts `other`.
This method creates a new Commit in ModelDB, and assigns a new ID to this object.
Parameters
----------
other : :class:`Commit`, optional
Commit to be reverted. If not provided, this Commit will be reverted.
message : str, optional
Description of the revert. If not provided, a default message will be used.
Raises
------
RuntimeError
If this Commit or `other` has not yet been saved, or if they do not belong to the
same Repository.
"""
if self.id is None:
raise RuntimeError("Commit must be saved before a revert can be performed")
if other is None:
other = self
elif not isinstance(other, Commit) or other.id is None:
raise TypeError("`other` must be a saved Commit")
elif self._repo.id != other._repo.id:
raise ValueError("Commit and `other` must belong to the same Repository")
msg = _VersioningService.RevertRepositoryCommitsRequest()
msg.base_commit_sha = self.id
msg.commit_to_revert_sha = other.id
if message is not None:
msg.content.message = message
data = _utils.proto_to_json(msg)
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/commits/{}/revert".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
msg.commit_to_revert_sha,
)
response = _utils.make_request("POST", endpoint, self._conn, json=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response), msg.Response)
self._become_saved_child(response_msg.commit.commit_sha)
def _to_heap_element(self):
date_created = int(self._commit_json['date_created']) # protobuf uint64 is str, so cast to int
# Most recent has higher priority
return (-date_created, self.id, self)
def get_common_parent(self, other):
if self.id is None:
raise RuntimeError("Commit must be saved before a common parent can be calculated")
if not isinstance(other, Commit) or other.id is None:
raise TypeError("`other` must be a saved Commit")
elif self._repo.id != other._repo.id:
raise ValueError("Commit and `other` must belong to the same Repository")
# Keep a set of all parents we see for each side. This doesn't have to be *all* but facilitates implementation
left_ids = set([self.id])
right_ids = set([other.id])
# Keep a heap of all candidate commits to be the common parent, ordered by the date so that we fetch the most recent first
heap = []
heapq.heappush(heap, self._to_heap_element())
heapq.heappush(heap, other._to_heap_element())
while heap:
# Get the most recent commit
_, _, commit = heapq.heappop(heap)
# If it's in the list for both sides, then it's a parent of both and return
if commit.id in left_ids and commit.id in right_ids:
return commit
# Update the heap with all the current parents
parent_ids = commit._parent_ids
for parent_id in parent_ids:
parent_commit = self._repo.get_commit(id=parent_id)
heap_element = parent_commit._to_heap_element()
try:
heapq.heappush(heap, heap_element)
except TypeError: # already in heap, because comparison between Commits failed
pass
# Update the parent sets based on which side the commit came from
# We know the commit came from the left if its ID is in the left set. If it was on the right too, then it would be the parent and we would have returned early
if commit.id in left_ids:
left_ids.update(parent_ids)
if commit.id in right_ids:
right_ids.update(parent_ids)
# Should never happen, since we have the initial commit
return None
def merge(self, other, message=None):
"""
Merges a branch headed by `other` into this Commit.
This method creates a new Commit in ModelDB, and assigns a new ID to this object.
Parameters
----------
other : :class:`Commit`
Commit to be merged.
message : str, optional
Description of the merge. If not provided, a default message will be used.
Raises
------
RuntimeError
If this Commit or `other` has not yet been saved, or if they do not belong to the
same Repository.
"""
if self.id is None:
raise RuntimeError("Commit must be saved before a merge can be performed")
if not isinstance(other, Commit) or other.id is None:
raise TypeError("`other` must be a saved Commit")
elif self._repo.id != other._repo.id:
raise ValueError("Commit and `other` must belong to the same Repository")
msg = _VersioningService.MergeRepositoryCommitsRequest()
if self.branch_name is not None:
msg.branch_b = self.branch_name
else:
msg.commit_sha_b = self.id
if other.branch_name is not None:
msg.branch_a = other.branch_name
else:
msg.commit_sha_a = other.id
if message is not None:
msg.content.message = message
data = _utils.proto_to_json(msg)
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/merge".format(
self._conn.scheme,
self._conn.socket,
self._repo.id,
)
response = _utils.make_request("POST", endpoint, self._conn, json=data)
_utils.raise_for_http_error(response)
response_msg = _utils.json_to_proto(_utils.body_to_json(response), msg.Response)
# raise for conflict
if response_msg.conflicts:
raise RuntimeError('\n '.join([
"merge conflict",
"resolution is not currently supported through the Client",
"please create a new Commit with the updated blobs",
"see https://docs.verta.ai/verta/experiment-management/guides/merge for instructions",
]))
self._become_saved_child(response_msg.commit.commit_sha)
def blob_msg_to_object(blob_msg):
# TODO: make this more concise
content_type = blob_msg.WhichOneof('content')
content_subtype = None
blob_cls = None
if content_type == 'code':
content_subtype = blob_msg.code.WhichOneof('content')
if content_subtype == 'git':
blob_cls = code.Git
elif content_subtype == 'notebook':
blob_cls = code.Notebook
elif content_type == 'config':
blob_cls = configuration.Hyperparameters
elif content_type == 'dataset':
content_subtype = blob_msg.dataset.WhichOneof('content')
if content_subtype == 's3':
blob_cls = dataset.S3
elif content_subtype == 'path':
blob_cls = dataset.Path
elif content_type == 'environment':
content_subtype = blob_msg.environment.WhichOneof('content')
if content_subtype == 'python':
blob_cls = environment.Python
elif content_subtype == 'docker':
raise NotImplementedError
if blob_cls is None:
if content_subtype is None:
raise NotImplementedError("found unexpected content type {};"
" please notify the Verta development team".format(content_type))
else:
raise NotImplementedError("found unexpected {} type {};"
" please notify the Verta development team".format(content_type, content_subtype))
return blob_cls._from_proto(blob_msg)
def path_to_location(path):
"""Messages take a `repeated string` of path components."""
if path.startswith('/'):
# `path` is already meant to be relative to repo root
path = path[1:]
return path.split('/')
def location_to_path(location):
return '/'.join(location)
def set_branch(conn, repo_id, commit_id, branch):
"""Sets `branch` to Commit `commit_id`."""
data = commit_id
endpoint = "{}://{}/api/v1/modeldb/versioning/repositories/{}/branches/{}".format(
conn.scheme,
conn.socket,
repo_id,
branch,
)
response = _utils.make_request("PUT", endpoint, conn, json=data)
_utils.raise_for_http_error(response)
| 35.995704
| 170
| 0.597577
|
ee3c7021cce70e4c1828a60d3c720bbe5f81820d
| 2,609
|
py
|
Python
|
var/spack/repos/builtin/packages/c-blosc2/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-03-05T10:54:32.000Z
|
2021-03-05T14:14:52.000Z
|
var/spack/repos/builtin/packages/c-blosc2/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 16
|
2021-05-12T06:03:24.000Z
|
2022-03-16T15:26:46.000Z
|
var/spack/repos/builtin/packages/c-blosc2/package.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CBlosc2(CMakePackage):
"""Next generation c-blosc with a new API, a new container and
other bells and whistles"""
homepage = "http://www.blosc.org"
url = "https://github.com/Blosc/c-blosc2/archive/refs/tags/v2.0.1.tar.gz"
git = "https://github.com/Blosc/c-blosc2.git"
maintainers = ['ax3l', 'robert-mijakovic']
version('develop', branch='master')
version('2.0.2', sha256='fba51ba601610441eea6046e384284b2d8d7884922060cf15369d01d713b9b77')
version('2.0.1', sha256='35b93dfed479b1dfd9372d41d7843b60254ed1d71792577b95e489c28705874f')
variant('avx2', default=True, description='Enable AVX2 support')
variant('lizard', default=True,
description='support for LIZARD (LZ5)')
variant('lz4', default=True,
description='support for LZ4')
variant('snappy', default=True,
description='support for SNAPPY')
variant('zlib', default=True,
description='support for ZLIB')
variant('zstd', default=True,
description='support for ZSTD')
depends_on('cmake@2.8.10:', type='build')
depends_on('lizard', when='+lizard')
depends_on('lz4', when='+lz4')
depends_on('snappy', when='+snappy')
depends_on('zlib', when='+zlib')
depends_on('zstd', when='+zstd')
def cmake_args(self):
spec = self.spec
args = [
'-DDEACTIVATE_LZ4={0}'.format(
'ON' if '~lz4' in spec else 'OFF'),
'-DDEACTIVATE_LIZARD={0}'.format(
'ON' if '~lizard' in spec else 'OFF'),
'-DDEACTIVATE_SNAPPY={0}'.format(
'ON' if '~snappy' in spec else 'OFF'),
'-DDEACTIVATE_ZLIB={0}'.format(
'ON' if '~zlib' in spec else 'OFF'),
'-DDEACTIVATE_ZSTD={0}'.format(
'ON' if '~zstd' in spec else 'OFF'),
'-DPREFER_EXTERNAL_LIZARD=ON',
'-DPREFER_EXTERNAL_LZ4=ON',
# snappy is supported via external install only
'-DPREFER_EXTERNAL_ZLIB=ON',
'-DPREFER_EXTERNAL_ZSTD=ON',
'-DDEACTIVATE_AVX2={0}'.format(
'ON' if '~avx2' in spec else 'OFF'),
self.define('BUILD_TESTS', self.run_tests),
self.define('BUILD_BENCHMARKS', self.run_tests),
self.define('BUILD_EXAMPLES', self.run_tests)
]
return args
| 37.271429
| 95
| 0.604829
|
0060798351f2adc51d90410368cc8b3ab076963b
| 1,036
|
py
|
Python
|
problem_042.py
|
arboreus/project_euler
|
796173e8e72fcbfc15bbb3def7a36349639fafcf
|
[
"MIT"
] | null | null | null |
problem_042.py
|
arboreus/project_euler
|
796173e8e72fcbfc15bbb3def7a36349639fafcf
|
[
"MIT"
] | null | null | null |
problem_042.py
|
arboreus/project_euler
|
796173e8e72fcbfc15bbb3def7a36349639fafcf
|
[
"MIT"
] | null | null | null |
#42) Coded triangle numbers
#The nth term of the sequence of triangle numbers is given by, tn = (1/2)*n*(n+1); so the first ten triangle numbers are:
#1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
#Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
#%% Solution
def triangle_nums(x):
n = 1
while int(1/2*n*(n+1)) <= x:
yield int(1/2*n*(n+1))
n += 1
with open("p042_words.txt", mode='r') as doc:
list_words = doc.read().replace('"', '').split(',')
list_values = [sum([ord(x)-64 for x in word]) for word in list_words]
list_triangle = [x for x in list_values if x in triangle_nums(max(list_values))]
len(list_triangle)
| 51.8
| 285
| 0.686293
|
d5dc6e847822a644e2e049d7625a0c74a209cb56
| 45,530
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py
|
littletomatodonkey/Paddle
|
3ec289a6a33c5392b914cc256736dcb00b2cecce
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py
|
littletomatodonkey/Paddle
|
3ec289a6a33c5392b914cc256736dcb00b2cecce
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py
|
littletomatodonkey/Paddle
|
3ec289a6a33c5392b914cc256736dcb00b2cecce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from paddle.fluid import Embedding, LayerNorm, Linear, Layer
from paddle.fluid.dygraph import to_variable, guard
from paddle.fluid.dygraph import TracedLayer
from test_imperative_base import new_program_scope
from paddle.fluid import core
import numpy as np
import six
np.set_printoptions(suppress=True)
from utils import DyGraphProgramDescTracerTestHelper, is_equal_program
# Copy from models
class TrainTaskConfig(object):
# support both CPU and GPU now.
use_gpu = True
# the epoch number to train.
pass_num = 30
# the number of sequences contained in a mini-batch.
# deprecated, set batch_size in args.
batch_size = 32
# the hyper parameters for Adam optimizer.
# This static learning_rate will be multiplied to the LearningRateScheduler
# derived learning rate the to get the final learning rate.
learning_rate = 2.0
beta1 = 0.9
beta2 = 0.997
eps = 1e-9
# the parameters for learning rate scheduling.
warmup_steps = 8000
# the weight used to mix up the ground-truth distribution and the fixed
# uniform distribution in label smoothing when training.
# Set this as zero if label smoothing is not wanted.
label_smooth_eps = 0.1
# the directory for saving trained models.
model_dir = "trained_models"
# the directory for saving checkpoints.
ckpt_dir = "trained_ckpts"
# the directory for loading checkpoint.
# If provided, continue training from the checkpoint.
ckpt_path = None
# the parameter to initialize the learning rate scheduler.
# It should be provided if use checkpoints, since the checkpoint doesn't
# include the training step counter currently.
start_step = 0
# the frequency to save trained models.
save_freq = 10000
class InferTaskConfig(object):
use_gpu = True
# the number of examples in one run for sequence generation.
batch_size = 10
# the parameters for beam search.
beam_size = 5
max_out_len = 256
# the number of decoded sentences to output.
n_best = 1
# the flags indicating whether to output the special tokens.
output_bos = False
output_eos = False
output_unk = True
# the directory for loading the trained model.
model_path = "trained_models/pass_1.infer.model"
class ModelHyperParams(object):
# These following five vocabularies related configurations will be set
# automatically according to the passed vocabulary path and special tokens.
# size of source word dictionary.
src_vocab_size = 10000
# size of target word dictionay
trg_vocab_size = 10000
# index for <bos> token
bos_idx = 0
# index for <eos> token
eos_idx = 1
# index for <unk> token
unk_idx = 2
# max length of sequences deciding the size of position encoding table.
max_length = 4
# the dimension for word embeddings, which is also the last dimension of
# the input and output of multi-head attention, position-wise feed-forward
# networks, encoder and decoder.
d_model = 512
# size of the hidden layer in position-wise feed-forward networks.
d_inner_hid = 2048
# the dimension that keys are projected to for dot-product attention.
d_key = 64
# the dimension that values are projected to for dot-product attention.
d_value = 64
# number of head used in multi-head attention.
n_head = 8
# number of sub-layers to be stacked in the encoder and decoder.
n_layer = 6
# dropout rates of different modules.
prepostprocess_dropout = 0.1
attention_dropout = 0.1
relu_dropout = 0.1
# to process before each sub-layer
preprocess_cmd = "n" # layer normalization
# to process after each sub-layer
postprocess_cmd = "da" # dropout + residual connection
# random seed used in dropout for CE.
dropout_seed = None
# the flag indicating whether to share embedding and softmax weights.
# vocabularies in source and target should be same for weight sharing.
weight_sharing = True
def merge_cfg_from_list(cfg_list, g_cfgs):
"""
Set the above global configurations using the cfg_list.
"""
assert len(cfg_list) % 2 == 0
for key, value in zip(cfg_list[0::2], cfg_list[1::2]):
for g_cfg in g_cfgs:
if hasattr(g_cfg, key):
try:
value = eval(value)
except Exception: # for file path
pass
setattr(g_cfg, key, value)
break
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
channels = d_pos_vec
position = np.arange(n_position)
num_timescales = channels // 2
log_timescale_increment = (np.log(float(1e4) / float(1)) /
(num_timescales - 1))
inv_timescales = np.exp(np.arange(
num_timescales)) * -log_timescale_increment
scaled_time = np.expand_dims(position, 1) * np.expand_dims(inv_timescales,
0)
signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1)
signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant')
position_enc = signal
return position_enc.astype("float32")
def create_data(is_static=False):
if is_static:
return [
src_word_np, src_pos_np, src_slf_attn_bias_np, trg_word_np,
trg_pos_np, trg_slf_attn_bias_np, trg_src_attn_bias_np, lbl_word_np,
lbl_weight_np
]
else:
enc_inputs = [
to_variable(
src_word_np, name='src_word'), to_variable(
src_pos_np, name='src_pos'), to_variable(
src_slf_attn_bias_np, name='src_slf_attn_bias')
]
dec_inputs = [
to_variable(
trg_word_np, name='trg_word'), to_variable(
trg_pos_np, name='trg_pos'), to_variable(
trg_slf_attn_bias_np, name='trg_slf_attn_bias'),
to_variable(
trg_src_attn_bias_np, name='trg_src_attn_bias')
]
label = to_variable(lbl_word_np, name='lbl_word')
weight = to_variable(lbl_weight_np, name='lbl_weight')
return enc_inputs, dec_inputs, label, weight
def create_feed_dict_list(data, init=False):
if init:
data_input_names = encoder_data_input_fields + \
decoder_data_input_fields[:-1] + label_data_input_fields + pos_enc_param_names
else:
data_input_names = encoder_data_input_fields + \
decoder_data_input_fields[:-1] + label_data_input_fields
feed_dict_list = dict()
for i in range(len(data_input_names)):
feed_dict_list[data_input_names[i]] = data[i]
return feed_dict_list
def make_all_inputs(input_fields):
"""
Define the input data layers for the transformer model.
"""
inputs = []
for input_field in input_fields:
input_var = fluid.layers.data(
name=input_field,
shape=input_descs[input_field][0],
dtype=input_descs[input_field][1],
lod_level=input_descs[input_field][2]
if len(input_descs[input_field]) == 3 else 0,
append_batch_size=False)
inputs.append(input_var)
return inputs
# The placeholder for batch_size in compile time. Must be -1 currently to be
# consistent with some ops' infer-shape output in compile time, such as the
# sequence_expand op used in beamsearch decoder.
batch_size = -1
# The placeholder for squence length in compile time.
seq_len = ModelHyperParams.max_length
# Here list the data shapes and data types of all inputs.
# The shapes here act as placeholder and are set to pass the infer-shape in
# compile time.
input_descs = {
# The actual data shape of src_word is:
# [batch_size, max_src_len_in_batch]
"src_word": [(batch_size, seq_len), "int64", 2],
# The actual data shape of src_pos is:
# [batch_size, max_src_len_in_batch]
"src_pos": [(batch_size, seq_len), "int64"],
# This input is used to remove attention weights on paddings in the
# encoder.
# The actual data shape of src_slf_attn_bias is:
# [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch]
"src_slf_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len,
seq_len), "float32"],
# The actual data shape of trg_word is:
# [batch_size, max_trg_len_in_batch]
"trg_word": [(batch_size, seq_len), "int64",
2], # lod_level is only used in fast decoder.
# The actual data shape of trg_pos is:
# [batch_size, max_trg_len_in_batch]
"trg_pos": [(batch_size, seq_len), "int64"],
# This input is used to remove attention weights on paddings and
# subsequent words in the decoder.
# The actual data shape of trg_slf_attn_bias is:
# [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch]
"trg_slf_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len,
seq_len), "float32"],
# This input is used to remove attention weights on paddings of the source
# input in the encoder-decoder attention.
# The actual data shape of trg_src_attn_bias is:
# [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch]
"trg_src_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len,
seq_len), "float32"],
# This input is used in independent decoder program for inference.
# The actual data shape of enc_output is:
# [batch_size, max_src_len_in_batch, d_model]
"enc_output": [(batch_size, seq_len, ModelHyperParams.d_model), "float32"],
# The actual data shape of label_word is:
# [batch_size * max_trg_len_in_batch, 1]
"lbl_word": [(batch_size * seq_len, 1), "int64"],
# This input is used to mask out the loss of paddding tokens.
# The actual data shape of label_weight is:
# [batch_size * max_trg_len_in_batch, 1]
"lbl_weight": [(batch_size * seq_len, 1), "float32"],
# This input is used in beam-search decoder.
"init_score": [(batch_size, 1), "float32", 2],
# This input is used in beam-search decoder for the first gather
# (cell states updation)
"init_idx": [(batch_size, ), "int32"],
}
# Names of word embedding table which might be reused for weight sharing.
word_emb_param_names = (
"src_word_emb_table",
"trg_word_emb_table", )
# Names of position encoding table which will be initialized externally.
pos_enc_param_names = (
"src_pos_enc_table",
"trg_pos_enc_table", )
# separated inputs for different usages.
encoder_data_input_fields = (
"src_word",
"src_pos",
"src_slf_attn_bias", )
decoder_data_input_fields = (
"trg_word",
"trg_pos",
"trg_slf_attn_bias",
"trg_src_attn_bias",
"enc_output", )
label_data_input_fields = (
"lbl_word",
"lbl_weight", )
# In fast decoder, trg_pos (only containing the current time step) is generated
# by ops and trg_slf_attn_bias is not needed.
fast_decoder_data_input_fields = (
"trg_word",
"init_score",
"init_idx",
"trg_src_attn_bias", )
# if we use py_reader
use_py_reader = False
# if we run sync mode
sync = False
# how many batches we use
batch_num = 5
np.random.seed = 90
src_word_np = np.arange(1, TrainTaskConfig.batch_size * seq_len + 1).reshape(
[TrainTaskConfig.batch_size, seq_len]).astype('int64')
src_pos_np = np.random.randint(
1, seq_len, size=(TrainTaskConfig.batch_size, seq_len), dtype='int64')
src_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size,
ModelHyperParams.n_head, seq_len,
seq_len).astype('float32')
trg_word_np = np.arange(1, TrainTaskConfig.batch_size * seq_len + 1).reshape(
[TrainTaskConfig.batch_size, seq_len]).astype('int64')
trg_pos_np = np.random.randint(
1, seq_len, size=(TrainTaskConfig.batch_size, seq_len), dtype='int64')
trg_slf_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size,
ModelHyperParams.n_head, seq_len,
seq_len).astype('float32')
trg_src_attn_bias_np = np.random.randn(TrainTaskConfig.batch_size,
ModelHyperParams.n_head, seq_len,
seq_len).astype('float32')
lbl_word_np = np.random.randint(
1,
ModelHyperParams.src_vocab_size - 1,
size=(TrainTaskConfig.batch_size * seq_len, 1),
dtype='int64')
lbl_weight_np = np.random.randn(TrainTaskConfig.batch_size * seq_len,
1).astype('float32')
pos_inp1 = position_encoding_init(ModelHyperParams.max_length,
ModelHyperParams.d_model)
pos_inp2 = position_encoding_init(ModelHyperParams.max_length,
ModelHyperParams.d_model)
class PrePostProcessLayer(Layer):
def __init__(self, d_model, process_cmd, shape_len=None):
super(PrePostProcessLayer, self).__init__()
for cmd in process_cmd:
if cmd == "n":
self._layer_norm = LayerNorm(
normalized_shape=d_model,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
def forward(self, prev_out, out, process_cmd, dropout_rate=0.):
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = self._layer_norm(out)
elif cmd == "d": # add dropout
if dropout_rate:
out = fluid.layers.dropout(
out,
dropout_prob=dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
return out
class PositionwiseFeedForwardLayer(Layer):
def __init__(self, d_inner_hid, d_hid, dropout_rate):
super(PositionwiseFeedForwardLayer, self).__init__()
self._i2h = Linear(d_hid, d_inner_hid, act="relu")
self._h2o = Linear(d_inner_hid, d_hid)
self._dropout_rate = dropout_rate
def forward(self, x):
hidden = self._i2h(x)
if self._dropout_rate:
hidden = fluid.layers.dropout(
hidden,
dropout_prob=self._dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
out = self._h2o(hidden)
return out
class MultiHeadAttentionLayer(Layer):
def __init__(self,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
gather_idx=None,
static_kv=False):
super(MultiHeadAttentionLayer, self).__init__()
self._n_head = n_head
self._d_key = d_key
self._d_value = d_value
self._d_model = d_model
self._dropout_rate = dropout_rate
self._q_fc = Linear(self._d_model, d_key * n_head, bias_attr=False)
self._k_fc = Linear(self._d_model, d_key * n_head, bias_attr=False)
self._v_fc = Linear(self._d_model, d_value * n_head, bias_attr=False)
self._proj_fc = Linear(d_value * n_head, self._d_model, bias_attr=False)
def forward(self, queries, keys, values, attn_bias):
# compute q ,k ,v
keys = queries if keys is None else keys
values = keys if values is None else values
q = self._q_fc(queries)
k = self._k_fc(keys)
v = self._v_fc(values)
# split head
reshaped_q = fluid.layers.reshape(
x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False)
transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
reshaped_k = fluid.layers.reshape(
x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False)
transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = fluid.layers.reshape(
x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False)
transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3])
# scale dot product attention
product = fluid.layers.matmul(
x=transpose_q,
y=transpose_k,
transpose_y=True,
alpha=self._d_model**-0.5)
if attn_bias:
product += attn_bias
weights = fluid.layers.softmax(product)
if self._dropout_rate:
weights_droped = fluid.layers.dropout(
weights,
dropout_prob=self._dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False)
out = fluid.layers.matmul(weights_droped, transpose_v)
else:
out = fluid.layers.matmul(weights, transpose_v)
# combine heads
if len(out.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3])
final_out = fluid.layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=False)
# fc to output
proj_out = self._proj_fc(final_out)
return proj_out
class EncoderSubLayer(Layer):
def __init__(self,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da"):
super(EncoderSubLayer, self).__init__()
self._preprocess_cmd = preprocess_cmd
self._postprocess_cmd = postprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout
self._preprocess_layer = PrePostProcessLayer(d_model,
self._preprocess_cmd, 3)
self._multihead_attention_layer = MultiHeadAttentionLayer(
d_key, d_value, d_model, n_head, attention_dropout)
self._postprocess_layer = PrePostProcessLayer(
d_model, self._postprocess_cmd, None)
self._preprocess_layer2 = PrePostProcessLayer(d_model,
self._preprocess_cmd, 3)
self._positionwise_feed_forward = PositionwiseFeedForwardLayer(
d_inner_hid, d_model, relu_dropout)
self._postprocess_layer2 = PrePostProcessLayer(
d_model, self._postprocess_cmd, None)
def forward(self, enc_input, attn_bias):
pre_process_multihead = self._preprocess_layer(
None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout)
attn_output = self._multihead_attention_layer(pre_process_multihead,
None, None, attn_bias)
attn_output = self._postprocess_layer(enc_input, attn_output,
self._postprocess_cmd,
self._prepostprocess_dropout)
pre_process2_output = self._preprocess_layer2(
None, attn_output, self._preprocess_cmd,
self._prepostprocess_dropout)
ffd_output = self._positionwise_feed_forward(pre_process2_output)
return self._postprocess_layer2(attn_output, ffd_output,
self._postprocess_cmd,
self._prepostprocess_dropout)
class EncoderLayer(Layer):
def __init__(self,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da"):
super(EncoderLayer, self).__init__()
self._preprocess_cmd = preprocess_cmd
self._encoder_sublayers = list()
self._prepostprocess_dropout = prepostprocess_dropout
self._n_layer = n_layer
self._preprocess_layer = PrePostProcessLayer(d_model,
self._preprocess_cmd, 3)
for i in range(n_layer):
self._encoder_sublayers.append(
self.add_sublayer(
'esl_%d' % i,
EncoderSubLayer(n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout,
attention_dropout, relu_dropout,
preprocess_cmd, postprocess_cmd)))
def forward(self, enc_input, attn_bias):
for i in range(self._n_layer):
enc_output = self._encoder_sublayers[i](enc_input, attn_bias)
enc_input = enc_output
return self._preprocess_layer(None, enc_output, self._preprocess_cmd,
self._prepostprocess_dropout)
class PrepareEncoderDecoderLayer(Layer):
def __init__(self,
src_vocab_size,
src_emb_dim,
src_max_len,
dropout_rate,
is_sparse=False,
word_emb_param_name=None,
pos_enc_param_name=None):
super(PrepareEncoderDecoderLayer, self).__init__()
self._src_max_len = src_max_len
self._src_emb_dim = src_emb_dim
self._src_vocab_size = src_vocab_size
self._dropout_rate = dropout_rate
self._input_emb = Embedding(
size=[src_vocab_size, src_emb_dim],
is_sparse=is_sparse,
padding_idx=0,
param_attr=fluid.ParamAttr(
name=word_emb_param_name,
initializer=fluid.initializer.Normal(0., src_emb_dim**-0.5)))
if pos_enc_param_name is pos_enc_param_names[0]:
pos_inp = pos_inp1
else:
pos_inp = pos_inp2
self._pos_emb = Embedding(
size=[self._src_max_len, src_emb_dim],
is_sparse=is_sparse,
param_attr=fluid.ParamAttr(
name=pos_enc_param_name,
initializer=fluid.initializer.NumpyArrayInitializer(pos_inp),
trainable=False))
# use in dygraph_mode to fit different length batch
# self._pos_emb._w = to_variable(
# position_encoding_init(self._src_max_len, self._src_emb_dim))
def forward(self, src_word, src_pos):
src_word_emb = self._input_emb(src_word)
src_word_emb = fluid.layers.scale(
x=src_word_emb, scale=self._src_emb_dim**0.5)
# # TODO change this to fit dynamic length input
src_pos_emb = self._pos_emb(src_pos)
src_pos_emb.stop_gradient = True
enc_input = src_word_emb + src_pos_emb
return fluid.layers.dropout(
enc_input,
dropout_prob=self._dropout_rate,
seed=ModelHyperParams.dropout_seed,
is_test=False) if self._dropout_rate else enc_input
class WrapEncoderLayer(Layer):
def __init__(self,
src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
is_sparse=False):
"""
The wrapper assembles together all needed layers for the encoder.
"""
super(WrapEncoderLayer, self).__init__()
self._prepare_encoder_layer = PrepareEncoderDecoderLayer(
src_vocab_size,
d_model,
max_length,
prepostprocess_dropout,
is_sparse=is_sparse,
word_emb_param_name=word_emb_param_names[0],
pos_enc_param_name=pos_enc_param_names[0])
self._encoder = EncoderLayer(n_layer, n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout,
attention_dropout, relu_dropout,
preprocess_cmd, postprocess_cmd)
def forward(self, enc_inputs):
src_word, src_pos, src_slf_attn_bias = enc_inputs
enc_input = self._prepare_encoder_layer(src_word, src_pos)
enc_output = self._encoder(enc_input, src_slf_attn_bias)
return enc_output
class DecoderSubLayer(Layer):
def __init__(self,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
cache=None,
gather_idx=None):
super(DecoderSubLayer, self).__init__()
self._postprocess_cmd = postprocess_cmd
self._preprocess_cmd = preprocess_cmd
self._prepostprcess_dropout = prepostprocess_dropout
self._pre_process_layer = PrePostProcessLayer(d_model, preprocess_cmd,
3)
self._multihead_attention_layer = MultiHeadAttentionLayer(
d_key,
d_value,
d_model,
n_head,
attention_dropout,
cache=cache,
gather_idx=gather_idx)
self._post_process_layer = PrePostProcessLayer(d_model, postprocess_cmd,
None)
self._pre_process_layer2 = PrePostProcessLayer(d_model, preprocess_cmd,
3)
self._multihead_attention_layer2 = MultiHeadAttentionLayer(
d_key,
d_value,
d_model,
n_head,
attention_dropout,
cache=cache,
gather_idx=gather_idx,
static_kv=True)
self._post_process_layer2 = PrePostProcessLayer(d_model,
postprocess_cmd, None)
self._pre_process_layer3 = PrePostProcessLayer(d_model, preprocess_cmd,
3)
self._positionwise_feed_forward_layer = PositionwiseFeedForwardLayer(
d_inner_hid, d_model, relu_dropout)
self._post_process_layer3 = PrePostProcessLayer(d_model,
postprocess_cmd, None)
def forward(self, dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias):
pre_process_rlt = self._pre_process_layer(
None, dec_input, self._preprocess_cmd, self._prepostprcess_dropout)
slf_attn_output = self._multihead_attention_layer(pre_process_rlt, None,
None, slf_attn_bias)
slf_attn_output_pp = self._post_process_layer(
dec_input, slf_attn_output, self._postprocess_cmd,
self._prepostprcess_dropout)
pre_process_rlt2 = self._pre_process_layer2(None, slf_attn_output_pp,
self._preprocess_cmd,
self._prepostprcess_dropout)
enc_attn_output_pp = self._multihead_attention_layer2(
pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias)
enc_attn_output = self._post_process_layer2(
slf_attn_output_pp, enc_attn_output_pp, self._postprocess_cmd,
self._prepostprcess_dropout)
pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output,
self._preprocess_cmd,
self._prepostprcess_dropout)
ffd_output = self._positionwise_feed_forward_layer(pre_process_rlt3)
dec_output = self._post_process_layer3(enc_attn_output, ffd_output,
self._postprocess_cmd,
self._prepostprcess_dropout)
return dec_output
class DecoderLayer(Layer):
def __init__(self,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
caches=None,
gather_idx=None):
super(DecoderLayer, self).__init__()
self._pre_process_layer = PrePostProcessLayer(d_model, preprocess_cmd,
3)
self._decoder_sub_layers = list()
self._n_layer = n_layer
self._preprocess_cmd = preprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout
for i in range(n_layer):
self._decoder_sub_layers.append(
self.add_sublayer(
'dsl_%d' % i,
DecoderSubLayer(
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
cache=None if caches is None else caches[i],
gather_idx=gather_idx)))
def forward(self, dec_input, enc_output, dec_slf_attn_bias,
dec_enc_attn_bias):
for i in range(self._n_layer):
tmp_dec_output = self._decoder_sub_layers[i](
dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias)
dec_input = tmp_dec_output
dec_output = self._pre_process_layer(None, tmp_dec_output,
self._preprocess_cmd,
self._prepostprocess_dropout)
return dec_output
class WrapDecoderLayer(Layer):
def __init__(self,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
caches=None,
gather_idx=None,
is_sparse=False):
"""
The wrapper assembles together all needed layers for the encoder.
"""
super(WrapDecoderLayer, self).__init__()
self._prepare_decoder_layer = PrepareEncoderDecoderLayer(
trg_vocab_size,
d_model,
max_length,
prepostprocess_dropout,
is_sparse=is_sparse,
word_emb_param_name=word_emb_param_names[1],
pos_enc_param_name=pos_enc_param_names[1])
self._decoder_layer = DecoderLayer(
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
caches=caches,
gather_idx=gather_idx)
self._weight_sharing = weight_sharing
if not weight_sharing:
self._fc = Linear(d_model, trg_vocab_size, bias_attr=False)
def forward(self, dec_inputs=None, enc_output=None):
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs
dec_input = self._prepare_decoder_layer(trg_word, trg_pos)
dec_output = self._decoder_layer(dec_input, enc_output,
trg_slf_attn_bias, trg_src_attn_bias)
dec_output_reshape = fluid.layers.reshape(
dec_output, shape=[-1, dec_output.shape[-1]], inplace=False)
if self._weight_sharing:
predict = fluid.layers.matmul(
x=dec_output_reshape,
y=self._prepare_decoder_layer._input_emb._w,
transpose_y=True)
else:
predict = self._fc(dec_output_reshape)
if dec_inputs is None:
# Return probs for independent decoder program.
predict_out = fluid.layers.softmax(predict)
return predict_out
return predict
class TransFormer(Layer):
def __init__(self,
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
label_smooth_eps,
use_py_reader=False,
is_test=False,
is_sparse=False):
super(TransFormer, self).__init__()
self._label_smooth_eps = label_smooth_eps
self._trg_vocab_size = trg_vocab_size
if weight_sharing:
assert src_vocab_size == trg_vocab_size, (
"Vocabularies in source and target should be same for weight sharing."
)
self._wrap_encoder_layer = WrapEncoderLayer(
src_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
is_sparse=is_sparse)
self._wrap_decoder_layer = WrapDecoderLayer(
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
is_sparse=is_sparse)
if weight_sharing:
self._wrap_decoder_layer._prepare_decoder_layer._input_emb._w = self._wrap_encoder_layer._prepare_encoder_layer._input_emb._w
def forward(self, enc_inputs, dec_inputs, label, weights):
enc_output = self._wrap_encoder_layer(enc_inputs)
predict = self._wrap_decoder_layer(dec_inputs, enc_output)
if self._label_smooth_eps:
label_out = fluid.layers.label_smooth(
label=fluid.layers.one_hot(
input=label, depth=self._trg_vocab_size),
epsilon=self._label_smooth_eps)
cost = fluid.layers.softmax_with_cross_entropy(
logits=predict,
label=label_out,
soft_label=True if self._label_smooth_eps else False)
weighted_cost = cost * weights
sum_cost = fluid.layers.reduce_sum(weighted_cost)
token_num = fluid.layers.reduce_sum(weights)
token_num.stop_gradient = True
avg_cost = sum_cost / token_num
return sum_cost, avg_cost, predict, token_num
class TestDygraphTransformerSortGradient(unittest.TestCase):
def test_transformer_sort_gradient(self):
for is_sparse in [True, False]:
self.transformer_sort_gradient_float32(is_sparse)
def transformer_sort_gradient_float32(self, is_sparse):
seed = 90
with guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
transformer = TransFormer(
ModelHyperParams.src_vocab_size,
ModelHyperParams.trg_vocab_size,
ModelHyperParams.max_length + 1,
ModelHyperParams.n_layer,
ModelHyperParams.n_head,
ModelHyperParams.d_key,
ModelHyperParams.d_value,
ModelHyperParams.d_model,
ModelHyperParams.d_inner_hid,
ModelHyperParams.prepostprocess_dropout,
ModelHyperParams.attention_dropout,
ModelHyperParams.relu_dropout,
ModelHyperParams.preprocess_cmd,
ModelHyperParams.postprocess_cmd,
ModelHyperParams.weight_sharing,
TrainTaskConfig.label_smooth_eps,
use_py_reader=use_py_reader,
is_test=False,
is_sparse=is_sparse)
if sync:
lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(
ModelHyperParams.d_model, TrainTaskConfig.warmup_steps)
with fluid.default_main_program()._lr_schedule_guard():
learning_rate = lr_decay * TrainTaskConfig.learning_rate
optimizer = fluid.optimizer.Adam(
learning_rate=learning_rate,
beta1=TrainTaskConfig.beta1,
beta2=TrainTaskConfig.beta2,
epsilon=TrainTaskConfig.eps,
parameter_list=transformer.parameters())
else:
optimizer = fluid.optimizer.SGD(
learning_rate=0.003,
parameter_list=transformer.parameters())
dy_param_init = dict()
dy_param_updated = dict()
helper = DyGraphProgramDescTracerTestHelper(self)
program = None
for i in range(batch_num):
enc_inputs, dec_inputs, label, weights = create_data()
if i % 2 == 0:
outs, traced_layer = TracedLayer.trace(
transformer, [enc_inputs, dec_inputs, label, weights])
ins_static = enc_inputs + dec_inputs + [label, weights]
outs_static = traced_layer(ins_static)
helper.assertEachVar(outs, outs_static)
if program is not None:
self.assertTrue(
is_equal_program(program, traced_layer.program))
program = traced_layer.program
traced_layer.save_inference_model(
'./infer_imperative_transformer',
feed=range(len(ins_static)),
fetch=range(len(outs_static)))
else:
outs = transformer(enc_inputs, dec_inputs, label, weights)
dy_sum_cost, dy_avg_cost, dy_predict, dy_token_num = outs
if i == 0:
for param in transformer.parameters():
dy_param_init[param.name] = param.numpy()
dy_avg_cost.backward(backward_strategy)
optimizer.minimize(dy_avg_cost)
transformer.clear_gradients()
if i == batch_num - 1:
for param in transformer.parameters():
dy_param_updated[param.name] = param.numpy()
dy_avg_cost_value = dy_avg_cost.numpy()
dy_sum_cost_value = dy_sum_cost.numpy()
dy_predict_value = dy_predict.numpy()
dy_token_num_value = dy_token_num.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
transformer = TransFormer(
ModelHyperParams.src_vocab_size,
ModelHyperParams.trg_vocab_size,
ModelHyperParams.max_length + 1,
ModelHyperParams.n_layer,
ModelHyperParams.n_head,
ModelHyperParams.d_key,
ModelHyperParams.d_value,
ModelHyperParams.d_model,
ModelHyperParams.d_inner_hid,
ModelHyperParams.prepostprocess_dropout,
ModelHyperParams.attention_dropout,
ModelHyperParams.relu_dropout,
ModelHyperParams.preprocess_cmd,
ModelHyperParams.postprocess_cmd,
ModelHyperParams.weight_sharing,
TrainTaskConfig.label_smooth_eps,
use_py_reader=use_py_reader,
is_test=False,
is_sparse=is_sparse)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
optimizer = fluid.optimizer.SGD(learning_rate=0.003)
data_input_names = encoder_data_input_fields + decoder_data_input_fields[:
-1] + label_data_input_fields
all_inputs = make_all_inputs(data_input_names)
enc_inputs_len = len(encoder_data_input_fields)
dec_inputs_len = len(decoder_data_input_fields[:-1])
enc_inputs = all_inputs[0:enc_inputs_len]
dec_inputs = all_inputs[enc_inputs_len:enc_inputs_len +
dec_inputs_len]
label = all_inputs[-2]
weights = all_inputs[-1]
static_param_updated = dict()
static_param_init = dict()
static_param_name_list = list()
static_sum_cost, static_avg_cost, static_predict, static_token_num = transformer(
enc_inputs, dec_inputs, label, weights)
optimizer.minimize(static_avg_cost)
for param in transformer.parameters():
static_param_name_list.append(param.name)
out = exe.run(fluid.default_startup_program(),
fetch_list=static_param_name_list)
for i in range(len(static_param_name_list)):
static_param_init[static_param_name_list[i]] = out[i]
static_sum_cost_value = None
static_avg_cost_value = None
static_predict_value = None
static_token_num_value = None
for i in range(batch_num):
feed_dict = create_feed_dict_list(create_data(True))
fetch_list = [
static_sum_cost, static_avg_cost, static_predict,
static_token_num
]
fetch_list.extend(static_param_name_list)
out = exe.run(fluid.default_main_program(),
feed=feed_dict,
fetch_list=fetch_list)
static_sum_cost_value = out[0]
static_avg_cost_value = out[1]
static_predict_value = out[2]
static_token_num_value = out[3]
if i == batch_num - 1:
for k in range(4, len(out)):
static_param_updated[static_param_name_list[k -
4]] = out[k]
self.assertTrue(
np.array_equal(static_avg_cost_value, dy_avg_cost_value))
self.assertTrue(
np.array_equal(static_sum_cost_value, dy_sum_cost_value))
self.assertTrue(np.array_equal(static_predict_value, dy_predict_value))
self.assertTrue(
np.array_equal(static_token_num_value, dy_token_num_value))
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key]))
if __name__ == '__main__':
unittest.main()
| 40.435169
| 137
| 0.594619
|
b30fcd8d7451e463fcd7b5db0d7ef6cb16f0786e
| 2,198
|
py
|
Python
|
tests/test_api.py
|
cclauss/personfinder
|
62417192e79c9711d0c6c7cfc042f6d6b0dc2dc2
|
[
"Apache-2.0"
] | 1
|
2021-11-18T20:09:09.000Z
|
2021-11-18T20:09:09.000Z
|
tests/test_api.py
|
cclauss/personfinder
|
62417192e79c9711d0c6c7cfc042f6d6b0dc2dc2
|
[
"Apache-2.0"
] | 8
|
2021-03-11T01:06:03.000Z
|
2022-02-27T10:46:12.000Z
|
tests/test_api.py
|
cclauss/personfinder
|
62417192e79c9711d0c6c7cfc042f6d6b0dc2dc2
|
[
"Apache-2.0"
] | 1
|
2022-01-05T07:06:43.000Z
|
2022-01-05T07:06:43.000Z
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for api.py module."""
__author__ = 'ichikawa@google.com (Hiroshi Ichikawa)'
import datetime
import unittest
import api
import model
import test_handler
from google.appengine.ext import testbed
class APITests(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_user_stub()
def tearDown(self):
self.testbed.deactivate()
def test_sms_render_person(self):
handler = test_handler.initialize_handler(
api.HandleSMS, 'api/handle_sms')
person = model.Person.create_original(
'haiti',
full_name='John Smith',
latest_status='believed_alive',
sex='male',
age='30',
home_city='Los Angeles',
home_state='California',
entry_date=datetime.datetime(2010, 1, 1))
assert (handler.render_person(person) ==
'John Smith / '
'Someone has received information that this person is alive / '
'male / 30 / From: Los Angeles California')
person = model.Person.create_original(
'haiti',
full_name='John Smith',
entry_date=datetime.datetime(2010, 1, 1))
assert handler.render_person(person) == 'John Smith'
person = model.Person.create_original(
'haiti',
full_name='John Smith',
home_state='California',
entry_date=datetime.datetime(2010, 1, 1))
assert handler.render_person(person) == 'John Smith / From: California'
| 31.855072
| 79
| 0.650591
|
d19660c7bb95fbbedcd40ac7bb2336b47b29560f
| 9,057
|
py
|
Python
|
vn.trader/ctaAlgo/strategyAtrRsi.py
|
Zollern233/vnpy
|
c813ec970b241c7761a7b020bf2b70c1010b508f
|
[
"MIT"
] | 2
|
2016-11-04T01:35:47.000Z
|
2016-11-04T01:35:50.000Z
|
vn.trader/ctaAlgo/strategyAtrRsi.py
|
samson-huang/vnpy
|
c813ec970b241c7761a7b020bf2b70c1010b508f
|
[
"MIT"
] | null | null | null |
vn.trader/ctaAlgo/strategyAtrRsi.py
|
samson-huang/vnpy
|
c813ec970b241c7761a7b020bf2b70c1010b508f
|
[
"MIT"
] | 1
|
2020-05-26T12:50:30.000Z
|
2020-05-26T12:50:30.000Z
|
# encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
########################################################################
class AtrRsiStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'AtrRsiStrategy'
author = u'用Python的交易员'
# 策略参数
atrLength = 22 # 计算ATR指标的窗口数
atrMaLength = 10 # 计算ATR均线的窗口数
rsiLength = 5 # 计算RSI的窗口数
rsiEntry = 16 # RSI的开仓信号
trailingPercent = 0.8 # 百分比移动止损
initDays = 10 # 初始化数据所用的天数
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 100 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
atrCount = 0 # 目前已经缓存了的ATR的计数
atrArray = np.zeros(bufferSize) # ATR指标的数组
atrValue = 0 # 最新的ATR指标数值
atrMa = 0 # ATR移动平均的数值
rsiValue = 0 # RSI指标的数值
rsiBuy = 0 # RSI买开阈值
rsiSell = 0 # RSI卖开阈值
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'atrMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(AtrRsiStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, 1)
elif self.rsiValue < self.rsiSell:
self.short(bar.close-5, 1)
# 持有多头仓位
elif self.pos > 0:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1-self.trailingPercent/100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, 1, stop=True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos < 0:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1+self.trailingPercent/100)
orderID = self.cover(shortStop, 1, stop=True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20120101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
## 在引擎中创建策略对象
#d = {'atrLength': 11}
#engine.initStrategy(AtrRsiStrategy, d)
## 开始跑回测
##engine.runBacktesting()
## 显示回测结果
##engine.showBacktestingResult()
# 跑优化
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 11, 20, 1) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
# 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
# 测试时还跑着一堆其他的程序,性能仅供参考
import time
start = time.time()
# 运行单进程优化函数,自动输出结果,耗时:359秒
#engine.runOptimization(AtrRsiStrategy, setting)
# 多进程优化,耗时:89秒
engine.runParallelOptimization(AtrRsiStrategy, setting)
print u'耗时:%s' %(time.time()-start)
| 31.3391
| 86
| 0.499503
|
5bbc27691ef60710be730a34518a081349fca24c
| 6,412
|
py
|
Python
|
satcfe/rede.py
|
base4sistemas/satcfe
|
530c4cda3ba0ee0114c73d78cdfd47ba79427cbf
|
[
"Apache-2.0"
] | 38
|
2015-05-25T02:57:16.000Z
|
2022-01-18T21:01:49.000Z
|
satcfe/rede.py
|
base4sistemas/satcfe
|
530c4cda3ba0ee0114c73d78cdfd47ba79427cbf
|
[
"Apache-2.0"
] | 15
|
2015-08-19T13:30:46.000Z
|
2022-01-19T22:34:17.000Z
|
satcfe/rede.py
|
base4sistemas/satcfe
|
530c4cda3ba0ee0114c73d78cdfd47ba79427cbf
|
[
"Apache-2.0"
] | 13
|
2015-05-07T01:10:12.000Z
|
2022-02-04T14:30:01.000Z
|
# -*- coding: utf-8 -*-
#
# satcfe/rede.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from satcomum.constantes import REDE_TIPOINTER_OPCOES
from satcomum.constantes import REDE_SEG_OPCOES
from satcomum.constantes import REDE_TIPOLAN_OPCOES
from satcomum.constantes import REDE_PROXY_OPCOES
from .entidades import Entidade
class ConfiguracaoRede(Entidade):
"""Uma entidade que contém os parâmetros de configurações da interface de
rede do equipamento SAT. Uma instância desta classe é usada como argumento
para :meth:`~satcfe.base._FuncoesSAT.configurar_interface_de_rede`.
:param str tipoInter: Tipo de interface de rede que o equipamento SAT
deverá utilizar. As opções de tipos de rede estão disponíveis na
constante :attr:`~satcomum.constantes.REDE_TIPOINTER_OPCOES`.
:param str SSID: *Opcional* Nome da rede sem fio, se for o caso, contendo
até 32 caracteres.
:param str seg: *Opcional* Tipo de segurança da rede sem fio. As opções
estão na constante :attr:`~satcomum.constantes.REDE_SEG_OPCOES`.
:param str codigo: *Opcional* Senha de acesso à rede sem fio, contendo
até 64 caracteres.
:param str tipoLan: Tipo da rede LAN. As opções estão disponíveis na
constante :attr:`~satcomum.constantes.REDE_TIPOLAN_OPCOES`.
:param str lanIP: *Opcional* Endereço IP do equipamento SAT.
:param str lanMask: *Opcional* Máscara de sub-rede.
:param str lanGW: *Opcional* Endereço IP do gateway padrão.
:param str lanDNS1: *Opcional* Endereço IP do DNS primário.
:param str lanDNS2: *Opcional* Endereço IP do DNS secundário.
:param str usuario: *Opcional* Nome do usuário para obtenção do endereço
IP, se necessário, contendo até 64 caracteres.
:param str senha: *Opcional* Senha do usuário para obtenção do endereço IP,
relacionado ao parâmetro ``usuario``, se necessário, contendo até 32
caracteres.
:param str proxy: *Opcional* Indica a configuração de proxy da rede.
As opções estão disponíveis na
constante :attr:`~satcomum.constantes.REDE_PROXY_OPCOES`.
:param str proxy_ip: *Opcional* Endereço IP do servidor proxy.
:param int proxy_porta: *Opcional* Número da porta por onde o servidor de
proxy responde.
:param str proxy_user: *Opcional* Nome do usuário para acesso ao proxy, se
necessário, contendo até 64 caracteres.
:param str proxy_senha: *Opcional* Senha do usuário para acesso ao proxy,
relacionado ao parâmetro ``proxy_user``, se necessário, contendo
até 64 caracteres.
"""
def __init__(self, **kwargs):
super(ConfiguracaoRede, self).__init__(schema={
'tipoInter': {
'type': 'string',
'required': True,
'allowed': [v for v, s in REDE_TIPOINTER_OPCOES]},
'SSID': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 32},
'seg': {
'type': 'string',
'required': False,
'allowed': [v for v, s in REDE_SEG_OPCOES]},
'codigo': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 64},
'tipoLan': {
'type': 'string',
'required': True,
'allowed': [v for v, s in REDE_TIPOLAN_OPCOES]},
'lanIP': {
'type': 'ipv4',
'required': False},
'lanMask': {
'type': 'ipv4',
'required': False},
'lanGW': {
'type': 'ipv4',
'required': False},
'lanDNS1': {
'type': 'ipv4',
'required': False},
'lanDNS2': {
'type': 'ipv4',
'required': False},
'usuario': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 64},
'senha': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 64},
'proxy': {
'type': 'string',
'required': False,
'allowed': [v for v, s in REDE_PROXY_OPCOES]},
'proxy_ip': {
'type': 'ipv4',
'required': False},
'proxy_porta': {
'type': 'integer',
'required': False,
'min': 0, 'max': 65535},
'proxy_user': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 64},
'proxy_senha': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 64},
}, **kwargs)
def _construir_elemento_xml(self, *args, **kwargs):
config = ET.Element('config')
for elemento in self._schema.keys():
valor = getattr(self, elemento, None)
if valor:
if isinstance(valor, int):
valor = str(valor)
ET.SubElement(config, elemento).text = valor
return config
| 39.097561
| 79
| 0.541953
|
d2c966ae8aeacec3a3bf1c4e605562c09d0d6f86
| 3,541
|
py
|
Python
|
web-frameworks/django/djangoWebPush/djangoWebPush/settings.py
|
suroegin-learning/learn-python
|
be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853
|
[
"MIT"
] | null | null | null |
web-frameworks/django/djangoWebPush/djangoWebPush/settings.py
|
suroegin-learning/learn-python
|
be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853
|
[
"MIT"
] | null | null | null |
web-frameworks/django/djangoWebPush/djangoWebPush/settings.py
|
suroegin-learning/learn-python
|
be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853
|
[
"MIT"
] | null | null | null |
"""
Django settings for djangoWebPush project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&vc$u)geq9lfckqif27j06_y^^0v=6dw7vy0eorp-sc+8t1cyd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webpush',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoWebPush.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
{
"BACKEND": "django_jinja.backend.Jinja2",
"OPTIONS": {
'extensions': ['webpush.jinja2.WebPushExtension'],
}
},
]
WSGI_APPLICATION = 'djangoWebPush.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
WEBPUSH_SETTINGS = {
"VAPID_PUBLIC_KEY": "BFRTDHKk1A9xsTby9aYNP5tt4avC0azbsM9pmtBBhSyj0LSoM6FP65m2ABP4zmU2Jivxs0lGAHIIFgdHD9-bpzY",
"VAPID_PRIVATE_KEY":"rvuSmW7nYAlqNmbiUx9VReefT8djJDpdRkb8-XkhvBM",
"VAPID_ADMIN_EMAIL": "ivan.suroegin@gmail.com"
}
| 26.22963
| 114
| 0.69839
|
d139b9f480cc324820e9654116833eac007452b5
| 38,095
|
py
|
Python
|
nova/volume/cinder.py
|
gyliu513/nova
|
14e974a5f77c72a9bb44c6801746abb2eda8e91d
|
[
"Apache-2.0"
] | 1
|
2019-11-07T03:11:37.000Z
|
2019-11-07T03:11:37.000Z
|
nova/volume/cinder.py
|
gyliu513/nova
|
14e974a5f77c72a9bb44c6801746abb2eda8e91d
|
[
"Apache-2.0"
] | 1
|
2021-03-31T19:35:21.000Z
|
2021-03-31T19:35:21.000Z
|
nova/volume/cinder.py
|
Mattlk13/nova
|
5b13eb59540aaf535a53920e783964d106de2620
|
[
"Apache-2.0"
] | 1
|
2020-07-22T22:14:40.000Z
|
2020-07-22T22:14:40.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import collections
import copy
import functools
import sys
from cinderclient import api_versions as cinder_api_versions
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from keystoneauth1 import exceptions as keystone_exception
from keystoneauth1 import loading as ks_loading
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import strutils
import six
from six.moves import urllib
from nova import availability_zones as az
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import service_auth
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_ADMIN_AUTH = None
_SESSION = None
def reset_globals():
"""Testing method to reset globals.
"""
global _ADMIN_AUTH
global _SESSION
_ADMIN_AUTH = None
_SESSION = None
def _load_auth_plugin(conf):
auth_plugin = ks_loading.load_auth_from_conf_options(conf,
nova.conf.cinder.cinder_group.name)
if auth_plugin:
return auth_plugin
if conf.cinder.auth_type is None:
LOG.error('The [cinder] section of your nova configuration file '
'must be configured for authentication with the '
'block-storage service endpoint.')
err_msg = _('Unknown auth type: %s') % conf.cinder.auth_type
raise cinder_exception.Unauthorized(401, message=err_msg)
def _load_session():
global _SESSION
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.cinder.cinder_group.name)
def _get_auth(context):
global _ADMIN_AUTH
# NOTE(lixipeng): Auth token is none when call
# cinder API from compute periodic tasks, context
# from them generated from 'context.get_admin_context'
# which only set is_admin=True but is without token.
# So add load_auth_plugin when this condition appear.
if context.is_admin and not context.auth_token:
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
return _ADMIN_AUTH
else:
return service_auth.get_auth_plugin(context)
# NOTE(efried): Bug #1752152
# This method is copied/adapted from cinderclient.client.get_server_version so
# we can use _SESSION.get rather than a raw requests.get to retrieve the
# version document. This enables HTTPS by gleaning cert info from the session
# config.
def _get_server_version(context, url):
"""Queries the server via the naked endpoint and gets version info.
:param context: The nova request context for auth.
:param url: url of the cinder endpoint
:returns: APIVersion object for min and max version supported by
the server
"""
min_version = "2.0"
current_version = "2.0"
_load_session()
auth = _get_auth(context)
try:
u = urllib.parse.urlparse(url)
version_url = None
# NOTE(andreykurilin): endpoint URL has at least 2 formats:
# 1. The classic (legacy) endpoint:
# http://{host}:{optional_port}/v{2 or 3}/{project-id}
# http://{host}:{optional_port}/v{2 or 3}
# 3. Under wsgi:
# http://{host}:{optional_port}/volume/v{2 or 3}
for ver in ['v2', 'v3']:
if u.path.endswith(ver) or "/{0}/".format(ver) in u.path:
path = u.path[:u.path.rfind(ver)]
version_url = '%s://%s%s' % (u.scheme, u.netloc, path)
break
if not version_url:
# NOTE(andreykurilin): probably, it is one of the next cases:
# * https://volume.example.com/
# * https://example.com/volume
# leave as is without cropping.
version_url = url
response = _SESSION.get(version_url, auth=auth)
data = jsonutils.loads(response.text)
versions = data['versions']
for version in versions:
if '3.' in version['version']:
min_version = version['min_version']
current_version = version['version']
break
except cinder_exception.ClientException as e:
LOG.warning("Error in server version query:%s\n"
"Returning APIVersion 2.0", six.text_type(e.message))
return (cinder_api_versions.APIVersion(min_version),
cinder_api_versions.APIVersion(current_version))
# NOTE(efried): Bug #1752152
# This method is copied/adapted from
# cinderclient.client.get_highest_client_server_version. See note on
# _get_server_version.
def _get_highest_client_server_version(context, url):
"""Returns highest APIVersion supported version by client and server."""
min_server, max_server = _get_server_version(context, url)
max_client = cinder_api_versions.APIVersion(
cinder_api_versions.MAX_VERSION)
return min(max_server, max_client)
def _check_microversion(context, url, microversion):
"""Checks to see if the requested microversion is supported by the current
version of python-cinderclient and the volume API endpoint.
:param context: The nova request context for auth.
:param url: Cinder API endpoint URL.
:param microversion: Requested microversion. If not available at the given
API endpoint URL, a CinderAPIVersionNotAvailable exception is raised.
:returns: The microversion if it is available. This can be used to
construct the cinder v3 client object.
:raises: CinderAPIVersionNotAvailable if the microversion is not available.
"""
max_api_version = _get_highest_client_server_version(context, url)
# Check if the max_api_version matches the requested minimum microversion.
if max_api_version.matches(microversion):
# The requested microversion is supported by the client and the server.
return microversion
raise exception.CinderAPIVersionNotAvailable(version=microversion)
def _get_cinderclient_parameters(context):
_load_session()
auth = _get_auth(context)
url = None
service_type, service_name, interface = CONF.cinder.catalog_info.split(':')
service_parameters = {'service_type': service_type,
'interface': interface,
'region_name': CONF.cinder.os_region_name}
# Only include the service_name if it's provided.
if service_name:
service_parameters['service_name'] = service_name
if CONF.cinder.endpoint_template:
url = CONF.cinder.endpoint_template % context.to_dict()
else:
url = _SESSION.get_endpoint(auth, **service_parameters)
return auth, service_parameters, url
def is_microversion_supported(context, microversion):
# NOTE(efried): Work around bug #1752152. Call the cinderclient() builder
# in a way that just does a microversion check.
cinderclient(context, microversion=microversion, check_only=True)
def cinderclient(context, microversion=None, skip_version_check=False,
check_only=False):
"""Constructs a cinder client object for making API requests.
:param context: The nova request context for auth.
:param microversion: Optional microversion to check against the client.
This implies that Cinder v3 is required for any calls that require a
microversion. If the microversion is not available, this method will
raise an CinderAPIVersionNotAvailable exception.
:param skip_version_check: If True and a specific microversion is
requested, the version discovery check is skipped and the microversion
is used directly. This should only be used if a previous check for the
same microversion was successful.
:param check_only: If True, don't build the actual client; just do the
setup and version checking.
:raises: UnsupportedCinderAPIVersion if a major version other than 3 is
requested.
:raises: CinderAPIVersionNotAvailable if microversion checking is requested
and the specified microversion is higher than what the service can
handle.
:returns: A cinderclient.client.Client wrapper, unless check_only is False.
"""
endpoint_override = None
auth, service_parameters, url = _get_cinderclient_parameters(context)
if CONF.cinder.endpoint_template:
endpoint_override = url
# TODO(jamielennox): This should be using proper version discovery from
# the cinder service rather than just inspecting the URL for certain string
# values.
version = cinder_client.get_volume_api_from_url(url)
if version != '3':
raise exception.UnsupportedCinderAPIVersion(version=version)
version = '3.0'
# Check to see a specific microversion is requested and if so, can it
# be handled by the backing server.
if microversion is not None:
if skip_version_check:
version = microversion
else:
version = _check_microversion(context, url, microversion)
if check_only:
return
return cinder_client.Client(version,
session=_SESSION,
auth=auth,
endpoint_override=endpoint_override,
connect_retries=CONF.cinder.http_retries,
global_request_id=context.global_id,
**service_parameters)
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
d['multiattach'] = getattr(vol, 'multiattach', False)
if vol.attachments:
d['attachments'] = collections.OrderedDict()
for attachment in vol.attachments:
a = {attachment['server_id']:
{'attachment_id': attachment.get('attachment_id'),
'mountpoint': attachment.get('device')}
}
d['attachments'].update(a.items())
d['attach_status'] = 'attached'
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.name
d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['bootable'] = strutils.bool_from_string(vol.bootable)
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
# The 3.48 microversion exposes a shared_targets boolean and service_uuid
# string parameter which can be used with locks during volume attach
# and detach.
if hasattr(vol, 'shared_targets'):
d['shared_targets'] = vol.shared_targets
d['service_uuid'] = vol.service_uuid
if hasattr(vol, 'migration_status'):
d['migration_status'] = vol.migration_status
return d
def _untranslate_volume_type_view(volume_type):
"""Maps keys for volume type view."""
v = {}
v['id'] = volume_type.id
v['name'] = volume_type.name
return v
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
d['display_name'] = snapshot.name
d['display_description'] = snapshot.description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def _translate_attachment_ref(attachment_ref):
"""Building old style connection_info by adding the 'data' key back."""
translated_con_info = {}
connection_info_data = attachment_ref.pop('connection_info', None)
if connection_info_data:
connection_info_data.pop('attachment_id', None)
translated_con_info['driver_volume_type'] = \
connection_info_data.pop('driver_volume_type', None)
translated_con_info['data'] = connection_info_data
translated_con_info['status'] = attachment_ref.pop('status', None)
translated_con_info['instance'] = attachment_ref.pop('instance', None)
translated_con_info['attached_at'] = attachment_ref.pop('attached_at',
None)
translated_con_info['detached_at'] = attachment_ref.pop('detached_at',
None)
# Now the catch all...
for k, v in attachment_ref.items():
# Keep these as top-level fields on the attachment record.
if k not in ("id", "attach_mode"):
translated_con_info[k] = v
attachment_ref['connection_info'] = translated_con_info
return attachment_ref
def translate_cinder_exception(method):
"""Transforms a cinder exception but keeps its traceback intact."""
@functools.wraps(method)
def wrapper(self, ctx, *args, **kwargs):
try:
res = method(self, ctx, *args, **kwargs)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.CinderConnectionFailed(reason=err_msg))
except (keystone_exception.BadRequest,
cinder_exception.BadRequest) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.InvalidInput(reason=err_msg))
except (keystone_exception.Forbidden,
cinder_exception.Forbidden) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.Forbidden(err_msg))
return res
return wrapper
def translate_create_exception(method):
"""Transforms the exception for create but keeps its traceback intact.
"""
def wrapper(self, ctx, size, *args, **kwargs):
try:
res = method(self, ctx, size, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound) as e:
_reraise(exception.NotFound(message=e.message))
except cinder_exception.OverLimit as e:
_reraise(exception.OverQuota(message=e.message))
return res
return translate_cinder_exception(wrapper)
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.VolumeNotFound(volume_id=volume_id))
except cinder_exception.OverLimit as e:
_reraise(exception.OverQuota(message=e.message))
return res
return translate_cinder_exception(wrapper)
def translate_attachment_exception(method):
"""Transforms the exception for the attachment but keeps its traceback
intact.
"""
def wrapper(self, ctx, attachment_id, *args, **kwargs):
try:
res = method(self, ctx, attachment_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.VolumeAttachmentNotFound(
attachment_id=attachment_id))
return res
return translate_cinder_exception(wrapper)
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.SnapshotNotFound(snapshot_id=snapshot_id))
return res
return translate_cinder_exception(wrapper)
def translate_mixed_exceptions(method):
"""Transforms exceptions that can come from both volumes and snapshots."""
def wrapper(self, ctx, res_id, *args, **kwargs):
try:
res = method(self, ctx, res_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.VolumeNotFound(volume_id=res_id))
except cinder_exception.OverLimit:
_reraise(exception.OverQuota(overs='snapshots'))
return res
return translate_cinder_exception(wrapper)
def _reraise(desired_exc):
six.reraise(type(desired_exc), desired_exc, sys.exc_info()[2])
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id, microversion=None):
"""Get the details about a volume given it's ID.
:param context: the nova request context
:param volume_id: the id of the volume to get
:param microversion: optional string microversion value
:raises: CinderAPIVersionNotAvailable if the specified microversion is
not available.
"""
item = cinderclient(
context, microversion=microversion).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
@translate_cinder_exception
def get_all(self, context, search_opts=None):
search_opts = search_opts or {}
items = cinderclient(context).volumes.list(detailed=True,
search_opts=search_opts)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("volume '%(vol)s' status must be 'in-use'. Currently in "
"'%(status)s' status") % {"vol": volume['id'],
"status": volume['status']}
raise exception.InvalidVolume(reason=msg)
def check_availability_zone(self, context, volume, instance=None):
"""Ensure that the availability zone is the same.
:param context: the nova request context
:param volume: the volume attached to the instance
:param instance: nova.objects.instance.Instance object
:raises: InvalidVolume if the instance availability zone does not
equal the volume's availability zone
"""
# TODO(walter-boring): move this check to Cinder as part of
# the reserve call.
if instance and not CONF.cinder.cross_az_attach:
instance_az = az.get_instance_availability_zone(context, instance)
if instance_az != volume['availability_zone']:
msg = _("Instance %(instance)s and volume %(vol)s are not in "
"the same availability_zone. Instance is in "
"%(ins_zone)s. Volume is in %(vol_zone)s") % {
"instance": instance.uuid,
"vol": volume['id'],
'ins_zone': instance_az,
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id, instance_uuid=None,
attachment_id=None):
client = cinderclient(context)
if attachment_id is None:
volume = self.get(context, volume_id)
if volume['multiattach']:
attachments = volume.get('attachments', {})
if instance_uuid:
attachment_id = attachments.get(instance_uuid, {}).\
get('attachment_id')
if not attachment_id:
LOG.warning(_LW("attachment_id couldn't be retrieved "
"for volume %(volume_id)s with "
"instance_uuid %(instance_id)s. The "
"volume has the 'multiattach' flag "
"enabled, without the attachment_id "
"Cinder most probably cannot perform "
"the detach."),
{'volume_id': volume_id,
'instance_id': instance_uuid})
else:
LOG.warning(_LW("attachment_id couldn't be retrieved for "
"volume %(volume_id)s. The volume has the "
"'multiattach' flag enabled, without the "
"attachment_id Cinder most probably "
"cannot perform the detach."),
{'volume_id': volume_id})
client.volumes.detach(volume_id, attachment_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
try:
connection_info = cinderclient(
context).volumes.initialize_connection(volume_id, connector)
connection_info['connector'] = connector
return connection_info
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Initialize connection failed for volume '
'%(vol)s on host %(host)s. Error: %(msg)s '
'Code: %(code)s. Attempting to terminate '
'connection.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(ex),
'code': ex.code})
try:
self.terminate_connection(context, volume_id, connector)
except Exception as exc:
LOG.error(_LE('Connection between volume %(vol)s and host '
'%(host)s might have succeeded, but attempt '
'to terminate connection has failed. '
'Validate the connection and determine if '
'manual cleanup is needed. Error: %(msg)s '
'Code: %(code)s.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(exc),
'code': (
exc.code if hasattr(exc, 'code') else None)})
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
@translate_cinder_exception
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
@translate_create_exception
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
client = cinderclient(context)
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
volume_type=volume_type,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id,
name=name,
description=description)
item = client.volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_cinder_exception
def get_absolute_limits(self, context):
"""Returns quota limit and usage information for the given tenant
See the <volumev3>/v3/{project_id}/limits API reference for details.
:param context: The nova RequestContext for the user request. Note
that the limit information returned from Cinder is specific to
the project_id within this context.
:returns: dict of absolute limits
"""
# cinderclient returns a generator of AbsoluteLimit objects, so iterate
# over the generator and return a dictionary which is easier for the
# nova client-side code to handle.
limits = cinderclient(context).limits.get().absolute
return {limit.name: limit.value for limit in limits}
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
@translate_cinder_exception
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_mixed_exceptions
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_mixed_exceptions
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
@translate_cinder_exception
def get_all_volume_types(self, context):
items = cinderclient(context).volume_types.list()
rvals = []
for item in items:
rvals.append(_untranslate_volume_type_view(item))
return rvals
@translate_cinder_exception
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
@translate_volume_exception
def attachment_create(self, context, volume_id, instance_id,
connector=None, mountpoint=None):
"""Create a volume attachment. This requires microversion >= 3.44.
The attachment_create call was introduced in microversion 3.27. We
need 3.44 as minmum here as we need attachment_complete to finish the
attaching process and it which was introduced in version 3.44.
:param context: The nova request context.
:param volume_id: UUID of the volume on which to create the attachment.
:param instance_id: UUID of the instance to which the volume will be
attached.
:param connector: host connector dict; if None, the attachment will
be 'reserved' but not yet attached.
:param mountpoint: Optional mount device name for the attachment,
e.g. "/dev/vdb". This is only used if a connector is provided.
:returns: a dict created from the
cinderclient.v3.attachments.VolumeAttachment object with a backward
compatible connection_info dict
"""
# NOTE(mriedem): Due to a limitation in the POST /attachments/
# API in Cinder, we have to pass the mountpoint in via the
# host connector rather than pass it in as a top-level parameter
# like in the os-attach volume action API. Hopefully this will be
# fixed some day with a new Cinder microversion but until then we
# work around it client-side.
_connector = connector
if _connector and mountpoint and 'mountpoint' not in _connector:
# Make a copy of the connector so we don't modify it by
# reference.
_connector = copy.deepcopy(connector)
_connector['mountpoint'] = mountpoint
try:
attachment_ref = cinderclient(context, '3.44').attachments.create(
volume_id, _connector, instance_id)
return _translate_attachment_ref(attachment_ref)
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
# NOTE: It is unnecessary to output BadRequest(400) error log,
# because operators don't need to debug such cases.
if getattr(ex, 'code', None) != 400:
LOG.error(('Create attachment failed for volume '
'%(volume_id)s. Error: %(msg)s Code: %(code)s'),
{'volume_id': volume_id,
'msg': six.text_type(ex),
'code': getattr(ex, 'code', None)},
instance_uuid=instance_id)
@translate_attachment_exception
def attachment_get(self, context, attachment_id):
"""Gets a volume attachment.
:param context: The nova request context.
:param attachment_id: UUID of the volume attachment to get.
:returns: a dict created from the
cinderclient.v3.attachments.VolumeAttachment object with a backward
compatible connection_info dict
"""
try:
attachment_ref = cinderclient(
context, '3.44', skip_version_check=True).attachments.show(
attachment_id)
translated_attach_ref = _translate_attachment_ref(
attachment_ref.to_dict())
return translated_attach_ref
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(('Show attachment failed for attachment '
'%(id)s. Error: %(msg)s Code: %(code)s'),
{'id': attachment_id,
'msg': six.text_type(ex),
'code': getattr(ex, 'code', None)})
@translate_attachment_exception
def attachment_update(self, context, attachment_id, connector,
mountpoint=None):
"""Updates the connector on the volume attachment. An attachment
without a connector is considered reserved but not fully attached.
:param context: The nova request context.
:param attachment_id: UUID of the volume attachment to update.
:param connector: host connector dict. This is required when updating
a volume attachment. To terminate a connection, the volume
attachment for that connection must be deleted.
:param mountpoint: Optional mount device name for the attachment,
e.g. "/dev/vdb". Theoretically this is optional per volume backend,
but in practice it's normally required so it's best to always
provide a value.
:returns: a dict created from the
cinderclient.v3.attachments.VolumeAttachment object with a backward
compatible connection_info dict
"""
# NOTE(mriedem): Due to a limitation in the PUT /attachments/{id}
# API in Cinder, we have to pass the mountpoint in via the
# host connector rather than pass it in as a top-level parameter
# like in the os-attach volume action API. Hopefully this will be
# fixed some day with a new Cinder microversion but until then we
# work around it client-side.
_connector = connector
if mountpoint and 'mountpoint' not in connector:
# Make a copy of the connector so we don't modify it by
# reference.
_connector = copy.deepcopy(connector)
_connector['mountpoint'] = mountpoint
try:
attachment_ref = cinderclient(
context, '3.44', skip_version_check=True).attachments.update(
attachment_id, _connector)
translated_attach_ref = _translate_attachment_ref(
attachment_ref.to_dict())
return translated_attach_ref
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(('Update attachment failed for attachment '
'%(id)s. Error: %(msg)s Code: %(code)s'),
{'id': attachment_id,
'msg': six.text_type(ex),
'code': getattr(ex, 'code', None)})
@translate_attachment_exception
def attachment_delete(self, context, attachment_id):
try:
cinderclient(
context, '3.44', skip_version_check=True).attachments.delete(
attachment_id)
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(('Delete attachment failed for attachment '
'%(id)s. Error: %(msg)s Code: %(code)s'),
{'id': attachment_id,
'msg': six.text_type(ex),
'code': getattr(ex, 'code', None)})
@translate_attachment_exception
def attachment_complete(self, context, attachment_id):
"""Marks a volume attachment complete.
This call should be used to inform Cinder that a volume attachment is
fully connected on the compute host so Cinder can apply the necessary
state changes to the volume info in its database.
:param context: The nova request context.
:param attachment_id: UUID of the volume attachment to update.
"""
try:
cinderclient(
context, '3.44', skip_version_check=True).attachments.complete(
attachment_id)
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(('Complete attachment failed for attachment '
'%(id)s. Error: %(msg)s Code: %(code)s'),
{'id': attachment_id,
'msg': six.text_type(ex),
'code': getattr(ex, 'code', None)})
| 41.725082
| 79
| 0.627379
|
244e0faaa1e59445a8baccb93512dc5be88873ac
| 20,916
|
py
|
Python
|
astropy/time/tests/test_methods.py
|
b1quint/astropy
|
a170a74739e4356c169429a42e554f9777b53f4d
|
[
"BSD-3-Clause"
] | 8
|
2019-04-27T01:19:45.000Z
|
2020-09-21T03:31:01.000Z
|
astropy/time/tests/test_methods.py
|
b1quint/astropy
|
a170a74739e4356c169429a42e554f9777b53f4d
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/time/tests/test_methods.py
|
b1quint/astropy
|
a170a74739e4356c169429a42e554f9777b53f4d
|
[
"BSD-3-Clause"
] | 5
|
2019-04-27T01:19:47.000Z
|
2020-09-20T15:15:19.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import copy
import pytest
import numpy as np
from astropy.time import Time
@pytest.fixture(scope="module", params=[True, False])
def masked(request):
# Could not figure out a better way to parametrize the setup method
global use_masked_data
use_masked_data = request.param
yield use_masked_data
class TestManipulation():
"""Manipulation of Time objects, ensuring attributes are done correctly."""
def setup(self):
mjd = np.arange(50000, 50010)
frac = np.arange(0., 0.999, 0.2)
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t0 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
self.t1 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
# Note: location is along last axis only.
self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
def test_ravel(self, masked):
t0_ravel = self.t0.ravel()
assert t0_ravel.shape == (self.t0.size,)
assert np.all(t0_ravel.jd1 == self.t0.jd1.ravel())
assert np.may_share_memory(t0_ravel.jd1, self.t0.jd1)
assert t0_ravel.location is None
t1_ravel = self.t1.ravel()
assert t1_ravel.shape == (self.t1.size,)
assert np.all(t1_ravel.jd1 == self.t1.jd1.ravel())
assert np.may_share_memory(t1_ravel.jd1, self.t1.jd1)
assert t1_ravel.location is self.t1.location
t2_ravel = self.t2.ravel()
assert t2_ravel.shape == (self.t2.size,)
assert np.all(t2_ravel.jd1 == self.t2.jd1.ravel())
assert np.may_share_memory(t2_ravel.jd1, self.t2.jd1)
assert t2_ravel.location.shape == t2_ravel.shape
# Broadcasting and ravelling cannot be done without a copy.
assert not np.may_share_memory(t2_ravel.location, self.t2.location)
def test_flatten(self, masked):
t0_flatten = self.t0.flatten()
assert t0_flatten.shape == (self.t0.size,)
assert t0_flatten.location is None
# Flatten always makes a copy.
assert not np.may_share_memory(t0_flatten.jd1, self.t0.jd1)
t1_flatten = self.t1.flatten()
assert t1_flatten.shape == (self.t1.size,)
assert not np.may_share_memory(t1_flatten.jd1, self.t1.jd1)
assert t1_flatten.location is not self.t1.location
assert t1_flatten.location == self.t1.location
t2_flatten = self.t2.flatten()
assert t2_flatten.shape == (self.t2.size,)
assert not np.may_share_memory(t2_flatten.jd1, self.t2.jd1)
assert t2_flatten.location.shape == t2_flatten.shape
assert not np.may_share_memory(t2_flatten.location, self.t2.location)
def test_transpose(self, masked):
t0_transpose = self.t0.transpose()
assert t0_transpose.shape == (5, 10)
assert np.all(t0_transpose.jd1 == self.t0.jd1.transpose())
assert np.may_share_memory(t0_transpose.jd1, self.t0.jd1)
assert t0_transpose.location is None
t1_transpose = self.t1.transpose()
assert t1_transpose.shape == (5, 10)
assert np.all(t1_transpose.jd1 == self.t1.jd1.transpose())
assert np.may_share_memory(t1_transpose.jd1, self.t1.jd1)
assert t1_transpose.location is self.t1.location
t2_transpose = self.t2.transpose()
assert t2_transpose.shape == (5, 10)
assert np.all(t2_transpose.jd1 == self.t2.jd1.transpose())
assert np.may_share_memory(t2_transpose.jd1, self.t2.jd1)
assert t2_transpose.location.shape == t2_transpose.shape
assert np.may_share_memory(t2_transpose.location, self.t2.location)
# Only one check on T, since it just calls transpose anyway.
t2_T = self.t2.T
assert t2_T.shape == (5, 10)
assert np.all(t2_T.jd1 == self.t2.jd1.T)
assert np.may_share_memory(t2_T.jd1, self.t2.jd1)
assert t2_T.location.shape == t2_T.location.shape
assert np.may_share_memory(t2_T.location, self.t2.location)
def test_diagonal(self, masked):
t0_diagonal = self.t0.diagonal()
assert t0_diagonal.shape == (5,)
assert np.all(t0_diagonal.jd1 == self.t0.jd1.diagonal())
assert t0_diagonal.location is None
assert np.may_share_memory(t0_diagonal.jd1, self.t0.jd1)
t1_diagonal = self.t1.diagonal()
assert t1_diagonal.shape == (5,)
assert np.all(t1_diagonal.jd1 == self.t1.jd1.diagonal())
assert t1_diagonal.location is self.t1.location
assert np.may_share_memory(t1_diagonal.jd1, self.t1.jd1)
t2_diagonal = self.t2.diagonal()
assert t2_diagonal.shape == (5,)
assert np.all(t2_diagonal.jd1 == self.t2.jd1.diagonal())
assert t2_diagonal.location.shape == t2_diagonal.shape
assert np.may_share_memory(t2_diagonal.jd1, self.t2.jd1)
assert np.may_share_memory(t2_diagonal.location, self.t2.location)
def test_swapaxes(self, masked):
t0_swapaxes = self.t0.swapaxes(0, 1)
assert t0_swapaxes.shape == (5, 10)
assert np.all(t0_swapaxes.jd1 == self.t0.jd1.swapaxes(0, 1))
assert np.may_share_memory(t0_swapaxes.jd1, self.t0.jd1)
assert t0_swapaxes.location is None
t1_swapaxes = self.t1.swapaxes(0, 1)
assert t1_swapaxes.shape == (5, 10)
assert np.all(t1_swapaxes.jd1 == self.t1.jd1.swapaxes(0, 1))
assert np.may_share_memory(t1_swapaxes.jd1, self.t1.jd1)
assert t1_swapaxes.location is self.t1.location
t2_swapaxes = self.t2.swapaxes(0, 1)
assert t2_swapaxes.shape == (5, 10)
assert np.all(t2_swapaxes.jd1 == self.t2.jd1.swapaxes(0, 1))
assert np.may_share_memory(t2_swapaxes.jd1, self.t2.jd1)
assert t2_swapaxes.location.shape == t2_swapaxes.shape
assert np.may_share_memory(t2_swapaxes.location, self.t2.location)
def test_reshape(self, masked):
t0_reshape = self.t0.reshape(5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert np.may_share_memory(t0_reshape.jd1, self.t0.jd1)
assert np.may_share_memory(t0_reshape.jd2, self.t0.jd2)
assert t0_reshape.location is None
t1_reshape = self.t1.reshape(2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
assert np.may_share_memory(t1_reshape.jd1, self.t1.jd1)
assert t1_reshape.location is self.t1.location
# For reshape(5, 2, 5), the location array can remain the same.
t2_reshape = self.t2.reshape(5, 2, 5)
assert t2_reshape.shape == (5, 2, 5)
assert np.all(t2_reshape.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_reshape.jd1, self.t2.jd1)
assert t2_reshape.location.shape == t2_reshape.shape
assert np.may_share_memory(t2_reshape.location, self.t2.location)
# But for reshape(5, 5, 2), location has to be broadcast and copied.
t2_reshape2 = self.t2.reshape(5, 5, 2)
assert t2_reshape2.shape == (5, 5, 2)
assert np.all(t2_reshape2.jd1 == self.t2.jd1.reshape(5, 5, 2))
assert np.may_share_memory(t2_reshape2.jd1, self.t2.jd1)
assert t2_reshape2.location.shape == t2_reshape2.shape
assert not np.may_share_memory(t2_reshape2.location, self.t2.location)
t2_reshape_t = self.t2.reshape(10, 5).T
assert t2_reshape_t.shape == (5, 10)
assert np.may_share_memory(t2_reshape_t.jd1, self.t2.jd1)
assert t2_reshape_t.location.shape == t2_reshape_t.shape
assert np.may_share_memory(t2_reshape_t.location, self.t2.location)
# Finally, reshape in a way that cannot be a view.
t2_reshape_t_reshape = t2_reshape_t.reshape(10, 5)
assert t2_reshape_t_reshape.shape == (10, 5)
assert not np.may_share_memory(t2_reshape_t_reshape.jd1, self.t2.jd1)
assert (t2_reshape_t_reshape.location.shape ==
t2_reshape_t_reshape.shape)
assert not np.may_share_memory(t2_reshape_t_reshape.location,
t2_reshape_t.location)
def test_shape_setting(self, masked):
t0_reshape = self.t0.copy()
mjd = t0_reshape.mjd # Creates a cache of the mjd attribute
t0_reshape.shape = (5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert mjd.shape != t0_reshape.mjd.shape # Cache got cleared
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert t0_reshape.location is None
# But if the shape doesn't work, one should get an error.
t0_reshape_t = t0_reshape.T
with pytest.raises(AttributeError):
t0_reshape_t.shape = (10, 5)
# check no shape was changed.
assert t0_reshape_t.shape == t0_reshape.T.shape
assert t0_reshape_t.jd1.shape == t0_reshape.T.shape
assert t0_reshape_t.jd2.shape == t0_reshape.T.shape
t1_reshape = self.t1.copy()
t1_reshape.shape = (2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
# location is a single element, so its shape should not change.
assert t1_reshape.location.shape == ()
# For reshape(5, 2, 5), the location array can remain the same.
# Note that we need to work directly on self.t2 here, since any
# copy would cause location to have the full shape.
self.t2.shape = (5, 2, 5)
assert self.t2.shape == (5, 2, 5)
assert self.t2.jd1.shape == (5, 2, 5)
assert self.t2.jd2.shape == (5, 2, 5)
assert self.t2.location.shape == (5, 2, 5)
assert self.t2.location.strides == (0, 0, 24)
# But for reshape(50), location would need to be copied, so this
# should fail.
oldshape = self.t2.shape
with pytest.raises(AttributeError):
self.t2.shape = (50,)
# check no shape was changed.
assert self.t2.jd1.shape == oldshape
assert self.t2.jd2.shape == oldshape
assert self.t2.location.shape == oldshape
# reset t2 to its original.
self.setup()
def test_squeeze(self, masked):
t0_squeeze = self.t0.reshape(5, 1, 2, 1, 5).squeeze()
assert t0_squeeze.shape == (5, 2, 5)
assert np.all(t0_squeeze.jd1 == self.t0.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t0_squeeze.jd1, self.t0.jd1)
assert t0_squeeze.location is None
t1_squeeze = self.t1.reshape(1, 5, 1, 2, 5).squeeze()
assert t1_squeeze.shape == (5, 2, 5)
assert np.all(t1_squeeze.jd1 == self.t1.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t1_squeeze.jd1, self.t1.jd1)
assert t1_squeeze.location is self.t1.location
t2_squeeze = self.t2.reshape(1, 1, 5, 2, 5, 1, 1).squeeze()
assert t2_squeeze.shape == (5, 2, 5)
assert np.all(t2_squeeze.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_squeeze.jd1, self.t2.jd1)
assert t2_squeeze.location.shape == t2_squeeze.shape
assert np.may_share_memory(t2_squeeze.location, self.t2.location)
def test_add_dimension(self, masked):
t0_adddim = self.t0[:, np.newaxis, :]
assert t0_adddim.shape == (10, 1, 5)
assert np.all(t0_adddim.jd1 == self.t0.jd1[:, np.newaxis, :])
assert np.may_share_memory(t0_adddim.jd1, self.t0.jd1)
assert t0_adddim.location is None
t1_adddim = self.t1[:, :, np.newaxis]
assert t1_adddim.shape == (10, 5, 1)
assert np.all(t1_adddim.jd1 == self.t1.jd1[:, :, np.newaxis])
assert np.may_share_memory(t1_adddim.jd1, self.t1.jd1)
assert t1_adddim.location is self.t1.location
t2_adddim = self.t2[:, :, np.newaxis]
assert t2_adddim.shape == (10, 5, 1)
assert np.all(t2_adddim.jd1 == self.t2.jd1[:, :, np.newaxis])
assert np.may_share_memory(t2_adddim.jd1, self.t2.jd1)
assert t2_adddim.location.shape == t2_adddim.shape
assert np.may_share_memory(t2_adddim.location, self.t2.location)
def test_take(self, masked):
t0_take = self.t0.take((5, 2))
assert t0_take.shape == (2,)
assert np.all(t0_take.jd1 == self.t0._time.jd1.take((5, 2)))
assert t0_take.location is None
t1_take = self.t1.take((2, 4), axis=1)
assert t1_take.shape == (10, 2)
assert np.all(t1_take.jd1 == self.t1.jd1.take((2, 4), axis=1))
assert t1_take.location is self.t1.location
t2_take = self.t2.take((1, 3, 7), axis=0)
assert t2_take.shape == (3, 5)
assert np.all(t2_take.jd1 == self.t2.jd1.take((1, 3, 7), axis=0))
assert t2_take.location.shape == t2_take.shape
t2_take2 = self.t2.take((5, 15))
assert t2_take2.shape == (2,)
assert np.all(t2_take2.jd1 == self.t2.jd1.take((5, 15)))
assert t2_take2.location.shape == t2_take2.shape
def test_broadcast(self, masked):
"""Test using a callable method."""
t0_broadcast = self.t0._apply(np.broadcast_to, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = self.t1._apply(np.broadcast_to, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = self.t2._apply(np.broadcast_to, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
class TestArithmetic():
"""Arithmetic on Time objects, using both doubles."""
kwargs = ({}, {'axis': None}, {'axis': 0}, {'axis': 1}, {'axis': 2})
functions = ('min', 'max', 'sort')
def setup(self):
mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1)
frac = np.array([0.1, 0.1+1.e-15, 0.1-1.e-15, 0.9+2.e-16, 0.9])
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t0 = Time(mjd, frac, format='mjd', scale='utc')
# Define arrays with same ordinal properties
frac = np.array([1, 2, 0, 4, 3])
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t1 = Time(mjd + frac, format='mjd', scale='utc')
self.jd = mjd + frac
@pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions))
def test_argfuncs(self, kw, func, masked):
"""
Test that np.argfunc(jd, **kw) is the same as t0.argfunc(**kw) where
jd is a similarly shaped array with the same ordinal properties but
all integer values. Also test the same for t1 which has the same
integral values as jd.
"""
t0v = getattr(self.t0, 'arg' + func)(**kw)
t1v = getattr(self.t1, 'arg' + func)(**kw)
jdv = getattr(np, 'arg' + func)(self.jd, **kw)
if self.t0.masked and kw == {'axis': None} and func == 'sort':
t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v])
t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v])
jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv])
assert np.all(t0v == jdv)
assert np.all(t1v == jdv)
assert t0v.shape == jdv.shape
assert t1v.shape == jdv.shape
@pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions))
def test_funcs(self, kw, func, masked):
"""
Test that np.func(jd, **kw) is the same as t1.func(**kw) where
jd is a similarly shaped array and the same integral values.
"""
t1v = getattr(self.t1, func)(**kw)
jdv = getattr(np, func)(self.jd, **kw)
assert np.all(t1v.value == jdv)
assert t1v.shape == jdv.shape
def test_argmin(self, masked):
assert self.t0.argmin() == 2
assert np.all(self.t0.argmin(axis=0) == 0)
assert np.all(self.t0.argmin(axis=1) == 0)
assert np.all(self.t0.argmin(axis=2) == 2)
def test_argmax(self, masked):
assert self.t0.argmax() == self.t0.size - 2
if masked:
# The 0 is where all entries are masked in that axis
assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1])
assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4])
else:
assert np.all(self.t0.argmax(axis=0) == 1)
assert np.all(self.t0.argmax(axis=1) == 4)
assert np.all(self.t0.argmax(axis=2) == 3)
def test_argsort(self, masked):
order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3]
assert np.all(self.t0.argsort() == np.array(order))
assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1))
assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1))
assert np.all(self.t0.argsort(axis=2) == np.array(order))
ravel = np.arange(50).reshape(-1, 5)[:, order].ravel()
if masked:
t0v = self.t0.argsort(axis=None)
# Manually remove elements in ravel that correspond to masked
# entries in self.t0. This removes the 10 entries that are masked
# which show up at the end of the list.
mask = self.t0.mask.ravel()[ravel]
ravel = ravel[~mask]
assert np.all(t0v[:-10] == ravel)
else:
assert np.all(self.t0.argsort(axis=None) == ravel)
def test_min(self, masked):
assert self.t0.min() == self.t0[0, 0, 2]
assert np.all(self.t0.min(0) == self.t0[0])
assert np.all(self.t0.min(1) == self.t0[:, 0])
assert np.all(self.t0.min(2) == self.t0[:, :, 2])
assert self.t0.min(0).shape == (5, 5)
assert self.t0.min(0, keepdims=True).shape == (1, 5, 5)
assert self.t0.min(1).shape == (2, 5)
assert self.t0.min(1, keepdims=True).shape == (2, 1, 5)
assert self.t0.min(2).shape == (2, 5)
assert self.t0.min(2, keepdims=True).shape == (2, 5, 1)
def test_max(self, masked):
assert self.t0.max() == self.t0[-1, -1, -2]
assert np.all(self.t0.max(0) == self.t0[1])
assert np.all(self.t0.max(1) == self.t0[:, 4])
assert np.all(self.t0.max(2) == self.t0[:, :, 3])
assert self.t0.max(0).shape == (5, 5)
assert self.t0.max(0, keepdims=True).shape == (1, 5, 5)
def test_ptp(self, masked):
assert self.t0.ptp() == self.t0.max() - self.t0.min()
assert np.all(self.t0.ptp(0) == self.t0.max(0) - self.t0.min(0))
assert self.t0.ptp(0).shape == (5, 5)
assert self.t0.ptp(0, keepdims=True).shape == (1, 5, 5)
def test_sort(self, masked):
order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3]
assert np.all(self.t0.sort() == self.t0[:, :, order])
assert np.all(self.t0.sort(0) == self.t0)
assert np.all(self.t0.sort(1) == self.t0)
assert np.all(self.t0.sort(2) == self.t0[:, :, order])
if not masked:
assert np.all(self.t0.sort(None) ==
self.t0[:, :, order].ravel())
# Bit superfluous, but good to check.
assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1))
assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1))
def test_regression():
# For #5225, where a time with a single-element delta_ut1_utc could not
# be copied, flattened, or ravelled. (For copy, it is in test_basic.)
t = Time(49580.0, scale='tai', format='mjd')
t_ut1 = t.ut1
t_ut1_copy = copy.deepcopy(t_ut1)
assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray
t_ut1_flatten = t_ut1.flatten()
assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray
t_ut1_ravel = t_ut1.ravel()
assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray
assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
| 48.082759
| 79
| 0.619143
|
565a590fde0f19a720809e9f9d74acd1017a1702
| 153
|
py
|
Python
|
vectors2d/__init__.py
|
MitryP/vectors
|
5ff73419c194a8228a4b03b83dcb2319685885e7
|
[
"MIT"
] | 2
|
2021-01-21T21:23:18.000Z
|
2021-01-24T14:34:11.000Z
|
vectors2d/__init__.py
|
MitryP/vectors
|
5ff73419c194a8228a4b03b83dcb2319685885e7
|
[
"MIT"
] | null | null | null |
vectors2d/__init__.py
|
MitryP/vectors
|
5ff73419c194a8228a4b03b83dcb2319685885e7
|
[
"MIT"
] | null | null | null |
from vectors2d.vectors2d import Vector2D, absolute_vector, sum_vectors, sub_vectors, mult_vector, scalar_mult_vectors, \
get_angle, vector_from_dots
| 51
| 120
| 0.836601
|
219dc969e7050db61674426166dbf32f37150d6c
| 2,701
|
py
|
Python
|
train.py
|
RajputJay41/Using-bert-and-pytorch-for-IMDB
|
74100f7a9d1121c49d28e047c736da06fee48e5c
|
[
"MIT"
] | 2
|
2020-10-01T14:36:27.000Z
|
2020-10-19T08:57:54.000Z
|
train.py
|
RajputJay41/Using-bert-and-pytorch-for-IMDB
|
74100f7a9d1121c49d28e047c736da06fee48e5c
|
[
"MIT"
] | null | null | null |
train.py
|
RajputJay41/Using-bert-and-pytorch-for-IMDB
|
74100f7a9d1121c49d28e047c736da06fee48e5c
|
[
"MIT"
] | 1
|
2021-07-05T06:56:30.000Z
|
2021-07-05T06:56:30.000Z
|
import config
import pandas as pd
from sklearn import model_selection
import dataset
import torch
from model import BERTBaseUncased
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
import engine
from sklearn import metrics
import numpy as np
import torch.nn as nn
def run():
dfx = pd.read_csv(config.TRAINING_FILE).fillna("none")
dfx.sentiment = dfx.sentiment.apply(
lambda x: 1 if x == "positve" else 0
)
df_train, df_valid = model_selection.train_test_split(
dfx,
test_size = 0.1,
random_state = 42,
stratify = dfx.sentiment.values
)
df_train = df_train.reset_index(drop=True)
df_valid = df_valid.reset_index(drop=True)
train_dataset = dataset.BERTDataset(
review = df_train.review.values,
target = df_train.sentiment.values
)
train_data_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN_BATCH_SIZE,
num_workers=4
)
valid_dataset = dataset.BERTDataset(
review = df_valid.review.values,
target = df_valid.sentiment.values
)
valid_data_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=config.VALID_BATCH_SIZE,
num_workers=1
)
device = torch.device(config.DEVICE)
model = BERTBaseUncased()
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bais", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_train_steps = int(len(df_train) / config.TRAIN_BATCH_SIZE * config.EPOCHS)
optimizer = AdamW(optimizer_parameters, lr=3e-5)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)
best_accuracy = 0
for epoch in range(config.EPOCHS):
engine.train_fn(train_data_loader, model, optimizer, device, scheduler)
outputs, targets = engine.eval_fn(valid_data_loader, model, device)
outputs = np.array(outputs) >= 0.5
accuracy = metrics.accuracy_score(targets, outputs)
print(f"Accuracy Score = {accuracy}")
if accuracy > best_accuracy:
torch.save(model.state_dict(), config.MODEL_PATH)
best_accuracy = accuracy
if __name__ == "__main__":
run()
| 27.01
| 114
| 0.65124
|
0795099f6bad0a4a94eecf02a511058619d1f2db
| 613
|
py
|
Python
|
dj_twitter_clone_app/blog/urls.py
|
ivanprytula/dj_demo_app
|
49ca506b22d3d99608e192b28787e185b39d3c24
|
[
"MIT"
] | null | null | null |
dj_twitter_clone_app/blog/urls.py
|
ivanprytula/dj_demo_app
|
49ca506b22d3d99608e192b28787e185b39d3c24
|
[
"MIT"
] | null | null | null |
dj_twitter_clone_app/blog/urls.py
|
ivanprytula/dj_demo_app
|
49ca506b22d3d99608e192b28787e185b39d3c24
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
BlogListView,
PostCreateView,
PostDetailView,
PostUpdateView,
PostDeleteView,
BlogCategory,
)
app_name = 'blog'
urlpatterns = [
path('', BlogListView.as_view(), name='blog_list'),
path('new/', PostCreateView.as_view(), name='post_new'),
path('<int:pk>/', PostDetailView.as_view(), name='post_detail'),
path('<int:pk>/update', PostUpdateView.as_view(), name='post_update'),
path('<int:pk>/delete', PostDeleteView.as_view(), name='post_delete'),
path('<category>/', BlogCategory.as_view(), name='post_category'),
]
| 29.190476
| 74
| 0.673736
|
6582135770973670789e3ad716e2d1136ee7c2a5
| 1,438
|
py
|
Python
|
pages/__init__.py
|
djaodjin/djaodjin-pages
|
e225783b30f82c79fd947e584b1fb61df7dff423
|
[
"BSD-2-Clause"
] | 11
|
2015-04-26T20:13:12.000Z
|
2021-10-01T06:43:38.000Z
|
pages/__init__.py
|
djaodjin/djaodjin-pages
|
e225783b30f82c79fd947e584b1fb61df7dff423
|
[
"BSD-2-Clause"
] | 32
|
2015-03-17T22:05:07.000Z
|
2022-03-30T16:37:03.000Z
|
pages/__init__.py
|
djaodjin/djaodjin-pages
|
e225783b30f82c79fd947e584b1fb61df7dff423
|
[
"BSD-2-Clause"
] | 10
|
2015-04-26T20:12:25.000Z
|
2021-02-02T01:42:38.000Z
|
# Copyright (c) 2021, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
PEP 386-compliant version number for the pages django app.
"""
__version__ = '0.4.3-dev'
| 47.933333
| 78
| 0.778164
|
47f0ac4de0d8cda7a0b2d3010dc257ed50c86d62
| 1,425
|
py
|
Python
|
filtering/Frequency_Filtering.py
|
NANOGravDataManagement/bridge
|
a03cf262767b210d1fbb62bb934f87cf411f1885
|
[
"Apache-2.0"
] | null | null | null |
filtering/Frequency_Filtering.py
|
NANOGravDataManagement/bridge
|
a03cf262767b210d1fbb62bb934f87cf411f1885
|
[
"Apache-2.0"
] | null | null | null |
filtering/Frequency_Filtering.py
|
NANOGravDataManagement/bridge
|
a03cf262767b210d1fbb62bb934f87cf411f1885
|
[
"Apache-2.0"
] | null | null | null |
# Frequency_Filtering.py
# A script that takes in a .tim file, frequency range, and an output directory
# as a result, it creates a new file with frequencies in the frequency range, stored in output directory
# sample input:
# python Frequency_Filtering.py /Users/fkeri/Desktop/B1855+09_NANOGrav_9yv0.tim 1300.0 1600.0 /Users/fkeri/Desktop/
# we can see that it takes in 4 line arguments: [INPUT FILE], [FREQ START], [FREQ END], [OUTPUT DIRECTORY]
# the output file will have the same name as the input file, with "FreqRange_" as a prefix: "FreqRange_B1855+09_NANOGrav_9yv0.tim"
# it is possible to name the output file differently by putting the file name in [OUTPUT DIRECTORY]: /Users/fkeri/Desktop/filename.tim
import sys
import os.path
inFile = open( sys.argv[1], "r" )
inFile.readline() #omit first line
ALLlines = inFile.readlines()
start = float( sys.argv[2] )
end = float( sys.argv[3] )
save_path = sys.argv[4]
if save_path[-4] != '.':
nameFile = os.path.join( save_path, "FreqRange_"+sys.argv[1].split("/")[-1] )
else:
nameFile = save_path
outFile = open( nameFile, "w" )
L = []
for i in range( 0, len( ALLlines ) ):
L.append( ALLlines[i].split(' ') )
#L.sort(key = lamda row: row[2] )
for i in range( 0, len( ALLlines )):
Frequency = float( L[i][1] )
if(Frequency >= start and Frequency <= end ):
X = ' '.join( L[i] )
outFile.write( X )
inFile.close()
outFile.close()
| 34.756098
| 134
| 0.687018
|
074124152a1b7afa6a29332bace989e302a0c03c
| 298
|
py
|
Python
|
forms.py
|
yichao-l/amr-eager-multilingual
|
6d96445a23ff493ceedea02712fbcceffe08b879
|
[
"BSD-2-Clause"
] | 15
|
2018-02-26T08:02:31.000Z
|
2021-11-05T05:49:43.000Z
|
forms.py
|
yichao-l/amr-eager-multilingual
|
6d96445a23ff493ceedea02712fbcceffe08b879
|
[
"BSD-2-Clause"
] | 1
|
2021-01-06T09:27:40.000Z
|
2021-01-06T12:45:08.000Z
|
forms.py
|
yichao-l/amr-eager-multilingual
|
6d96445a23ff493ceedea02712fbcceffe08b879
|
[
"BSD-2-Clause"
] | 7
|
2018-07-24T21:21:03.000Z
|
2022-03-07T06:19:58.000Z
|
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField
from wtforms.validators import DataRequired
class InputForm(FlaskForm):
#input_sent = StringField('input_sent', validators=[DataRequired()])
input_sent = TextAreaField('input_sent', validators=[DataRequired()])
| 42.571429
| 73
| 0.802013
|
7703cf9e0cb59214b34b7c7d8683840d2d78ea6e
| 28,943
|
py
|
Python
|
features/eolearn/features/interpolation.py
|
chorng/eo-learn
|
a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c
|
[
"MIT"
] | null | null | null |
features/eolearn/features/interpolation.py
|
chorng/eo-learn
|
a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c
|
[
"MIT"
] | null | null | null |
features/eolearn/features/interpolation.py
|
chorng/eo-learn
|
a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c
|
[
"MIT"
] | null | null | null |
"""
Module for interpolating, smoothing and re-sampling features in EOPatch
Credits:
Copyright (c) 2017-2022 Matej Aleksandrov, Matej Batič, Grega Milčinski, Domagoj Korais, Matic Lubej (Sinergise)
Copyright (c) 2017-2022 Žiga Lukšič, Devis Peressutti, Nejc Vesel, Jovan Višnjić, Anže Zupanc (Sinergise)
Copyright (c) 2017-2019 Blaž Sovdat, Andrej Burja (Sinergise)
Copyright (c) 2018-2019 Filip Koprivec (Jožef Stefan Institute)
Copyright (c) 2018-2019 William Ouellette
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import warnings
import datetime as dt
import inspect
from functools import partial
import dateutil
import scipy.interpolate
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from eolearn.core import EOTask, EOPatch, FeatureType, FeatureTypeSet
from eolearn.core.exceptions import EOUserWarning
try:
import numba
except ImportError as exception:
warnings.warn(
f"Failed to import numba with exception: '{exception}'. Some interpolation tasks won't work", EOUserWarning
)
def base_interpolation_function(data, times, resampled_times):
"""Interpolates data feature
:param data: Array in a shape of t x (h x w x n)
:type data: numpy.ndarray
:param times: Array of reference times relative to the first timestamp
:type times:
:param resampled_times: Array of reference times relative to the first timestamp in initial timestamp array.
:type resampled_times: numpy.array
:return: Array of interpolated values
:rtype: numpy.ndarray
"""
_, height_width_depth = data.shape
new_bands = np.empty((len(resampled_times), height_width_depth))
for n_feat in numba.prange(height_width_depth):
mask1d = ~np.isnan(data[:, n_feat])
if not mask1d.any():
new_data = np.empty(len(resampled_times))
new_data[:] = np.nan
else:
new_data = np.interp(
resampled_times.astype(np.float64),
times[mask1d].astype(np.float64),
data[:, n_feat][mask1d].astype(np.float64),
)
true_index = np.where(mask1d)
index_first, index_last = true_index[0][0], true_index[0][-1]
min_time, max_time = times[index_first], times[index_last]
first = np.where(resampled_times < min_time)[0]
if first.size:
new_data[: first[-1] + 1] = np.nan
last = np.where(max_time < resampled_times)[0]
if last.size:
new_data[last[0] :] = np.nan
new_bands[:, n_feat] = new_data.astype(data.dtype)
return new_bands
try:
# pylint: disable=invalid-name
interpolation_function = numba.njit(base_interpolation_function)
interpolation_function_parallel = numba.njit(base_interpolation_function, parallel=True)
except NameError:
pass
class InterpolationTask(EOTask):
"""Main EOTask class for interpolation and resampling of time-series.
The task takes from EOPatch the specified data feature and timestamps. For each pixel in the spatial grid it
creates an interpolation model using values that are not NaN or masked with `eopatch.mask['VALID_DATA']`. Then
it replaces invalid values using interpolation model. If ``resample_range`` parameter is used the values in
time series will be resampled to new timestamps.
In the process the interpolated feature is overwritten and so are the timestamps. After the execution of the task
the feature will contain interpolated and resampled values and corresponding new timestamps.
:param feature: A feature to be interpolated with optional new feature name
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param interpolation_object: Interpolation class which is initialized with
:type interpolation_object: object
:param resample_range: If None the data will be only interpolated over existing timestamps and NaN values will be
replaced with interpolated values (if possible) in the existing EOPatch. Otherwise ``resample_range`` can be
set to tuple in a form of (start_date, end_date, step_days), e.g. ('2018-01-01', '2018-06-01', 16). This will
create a new EOPatch with resampled values for times start_date, start_date + step_days,
start_date + 2 * step_days, ... . End date is excluded from timestamps. Additionally, ``resample_range`` can
be a list of dates or date-strings where the interpolation will be evaluated.
:type resample_range: (str, str, int) or list(str) or list(datetime.datetime) or None
:param result_interval: Maximum and minimum of returned data
:type result_interval: (float, float)
:param mask_feature: A mask feature which will be used to mask certain features
:type mask_feature: (FeatureType, str)
:param copy_features: List of tuples of type (FeatureType, str) or (FeatureType, str, str) that are copied
over into the new EOPatch. The first string is the feature name, and the second one (optional) is a new name
to be used for the feature
:type copy_features: list((FeatureType, str) or (FeatureType, str, str))
:param unknown_value: Value which will be used for timestamps where interpolation cannot be calculated
:type unknown_value: float or numpy.nan
:param filling_factor: Multiplication factor used to create temporal gap between consecutive observations. Value
has to be greater than 1. Default is `10`
:type filling_factor: int
:param scale_time: Factor used to scale the time difference in seconds between acquisitions. If `scale_time=60`,
returned time is in minutes, if `scale_time=3600` in hours. Default is `3600`
:type scale_time: int
:param interpolate_pixel_wise: Flag to indicate pixel wise interpolation or fast interpolation that creates a single
interpolation object for the whole image
:type interpolate_pixel_wise: bool
:param interpolation_parameters: Parameters which will be propagated to ``interpolation_object``
"""
def __init__(
self,
feature,
interpolation_object,
*,
resample_range=None,
result_interval=None,
mask_feature=None,
copy_features=None,
unknown_value=np.nan,
filling_factor=10,
scale_time=3600,
interpolate_pixel_wise=False,
**interpolation_parameters,
):
self.renamed_feature = self.parse_renamed_feature(feature, allowed_feature_types=FeatureTypeSet.RASTER_TYPES_4D)
self.interpolation_object = interpolation_object
self.resample_range = resample_range
self.result_interval = result_interval
self.mask_feature_parser = (
None
if mask_feature is None
else self.get_feature_parser(
mask_feature, allowed_feature_types={FeatureType.MASK, FeatureType.MASK_TIMELESS, FeatureType.LABEL}
)
)
if resample_range is None and copy_features is not None:
self.copy_features = None
warnings.warn(
'Argument "copy_features" will be ignored if "resample_range" is None. Nothing to copy.', EOUserWarning
)
else:
self.copy_features_parser = None if copy_features is None else self.get_feature_parser(copy_features)
self.unknown_value = unknown_value
self.interpolation_parameters = interpolation_parameters
self.scale_time = scale_time
self.filling_factor = filling_factor
self.interpolate_pixel_wise = interpolate_pixel_wise
self._resampled_times = None
@staticmethod
def _mask_feature_data(feature_data, mask, mask_type):
"""Masks values of data feature with a given mask of given mask type. The masking is done by assigning
`numpy.nan` value.
:param feature_data: Data array which will be masked
:type feature_data: numpy.ndarray
:param mask: Mask array
:type mask: numpy.ndarray
:param mask_type: Feature type of mask
:type mask_type: FeatureType
:return: Masked data array
:rtype: numpy.ndarray
"""
if mask_type.is_spatial() and feature_data.shape[1:3] != mask.shape[-3:-1]:
raise ValueError(
f"Spatial dimensions of interpolation and mask feature do not match: {feature_data.shape} {mask.shape}"
)
if mask_type.is_temporal() and feature_data.shape[0] != mask.shape[0]:
raise ValueError(
f"Time dimension of interpolation and mask feature do not match: {feature_data.shape} {mask.shape}"
)
# This allows masking each channel differently but causes some complications while masking with label
if mask.shape[-1] != feature_data.shape[-1]:
mask = mask[..., 0]
if mask_type is FeatureType.MASK:
feature_data[mask, ...] = np.nan
elif mask_type is FeatureType.MASK_TIMELESS:
feature_data[:, mask, ...] = np.nan
elif mask_type is FeatureType.LABEL:
np.swapaxes(feature_data, 1, 3)
feature_data[mask, ..., :, :] = np.nan
np.swapaxes(feature_data, 1, 3)
return feature_data
@staticmethod
def _get_start_end_nans(data):
"""Find NaN values in data that either start or end the time-series
Function to return a binary array of same size as data where `True` values correspond to NaN values present at
beginning or end of time-series. NaNs internal to the time-series are not included in the binary mask.
:param data: Array of observations of size TxNOBS
:type data: numpy.array
:return: Binary array of shape TxNOBS. `True` values indicate NaNs present at beginning or end of time-series
:rtype: numpy.array
"""
# find NaNs that start a time-series
start_nan = np.isnan(data)
for idx, row in enumerate(start_nan[:-1]):
start_nan[idx + 1] = np.logical_and(row, start_nan[idx + 1])
# find NaNs that end a time-series
end_nan = np.isnan(data)
for idx, row in enumerate(end_nan[-2::-1]):
end_nan[-idx - 2] = np.logical_and(row, end_nan[-idx - 1])
return np.logical_or(start_nan, end_nan)
@staticmethod
def _get_unique_times(data, times):
"""Replace duplicate acquisitions which have same values on the chosen time scale with their average.
The average is calculated with numpy.nanmean, meaning that NaN values are ignored when calculating the average.
:param data: Array in a shape of t x nobs, where nobs = h x w x n
:type data: numpy.ndarray
:param times: Array of reference times relative to the first timestamp
:type times: numpy.array
:return: cleaned versions of data input
:rtype: numpy.ndarray
:return: cleaned versions of times input
:rtype: numpy.array
"""
seen = set()
duplicated_indices = np.array(
[idx for idx, item in enumerate(times) if item in seen or seen.add(item)], dtype=int
)
duplicated_times = np.unique(times[duplicated_indices])
for time in duplicated_times:
indices = np.where(times == time)[0]
nan_mask = np.all(np.isnan(data[indices]), axis=0)
data[indices[0], ~nan_mask] = np.nanmean(data[indices][:, ~nan_mask], axis=0)
times = np.delete(times, duplicated_indices, axis=0)
data = np.delete(data, duplicated_indices, axis=0)
return data, times
def _copy_old_features(self, new_eopatch, old_eopatch):
"""Copy features from old EOPatch
:param new_eopatch: New EOPatch container where the old features will be copied to
:type new_eopatch: EOPatch
:param old_eopatch: Old EOPatch container where the old features are located
:type old_eopatch: EOPatch
"""
if self.copy_features_parser is not None:
existing_features = set(new_eopatch.get_feature_list())
renamed_features = self.copy_features_parser.get_renamed_features(old_eopatch)
for copy_feature_type, copy_feature_name, copy_new_feature_name in renamed_features:
new_feature = copy_feature_type, copy_new_feature_name
if new_feature in existing_features:
raise ValueError(
f"Feature {copy_new_feature_name} of {copy_feature_type} already exists in the "
"new EOPatch! Use a different name!"
)
existing_features.add(new_feature)
new_eopatch[copy_feature_type][copy_new_feature_name] = old_eopatch[copy_feature_type][
copy_feature_name
]
return new_eopatch
def interpolate_data(self, data, times, resampled_times):
"""Interpolates data feature
:param data: Array in a shape of t x nobs, where nobs = h x w x n
:type data: numpy.ndarray
:param times: Array of reference times relative to the first timestamp
:type times: numpy.array
:param resampled_times: Array of reference times relative to the first timestamp in initial timestamp array.
:type resampled_times: numpy.array
:return: Array of interpolated values
:rtype: numpy.ndarray
"""
# pylint: disable=too-many-locals
# get size of 2d array t x nobs
nobs = data.shape[-1]
if self.interpolate_pixel_wise:
# initialise array of interpolated values
new_data = (
data if self.resample_range is None else np.full((len(resampled_times), nobs), np.nan, dtype=data.dtype)
)
# Interpolate for each pixel, could be easily parallelized
for obs in range(nobs):
valid = ~np.isnan(data[:, obs])
obs_interpolating_func = self.get_interpolation_function(times[valid], data[valid, obs])
new_data[:, obs] = obs_interpolating_func(resampled_times[:, np.newaxis])
# return interpolated values
return new_data
# mask representing overlap between reference and resampled times
time_mask = (resampled_times >= np.min(times)) & (resampled_times <= np.max(times))
# define time values as linear monotonically increasing over the observations
const = int(self.filling_factor * (np.max(times) - np.min(times)))
temp_values = times[:, np.newaxis] + const * np.arange(nobs)[np.newaxis, :].astype(np.float64)
res_temp_values = resampled_times[:, np.newaxis] + const * np.arange(nobs)[np.newaxis, :].astype(np.float64)
# initialise array of interpolated values
new_data = np.full((len(resampled_times), nobs), np.nan, dtype=data.dtype)
# array defining index correspondence between reference times and resampled times
ori2res = np.array(
[
np.abs(resampled_times - o).argmin()
if np.min(resampled_times) <= o <= np.max(resampled_times)
else None
for o in times
]
)
# find NaNs that start or end a time-series
row_nans, col_nans = np.where(self._get_start_end_nans(data))
nan_row_res_indices = np.array([index for index in ori2res[row_nans] if index is not None], dtype=np.int32)
nan_col_res_indices = np.array([index is not None for index in ori2res[row_nans]], dtype=bool)
if nan_row_res_indices.size:
# mask out from output values the starting/ending NaNs
res_temp_values[nan_row_res_indices, col_nans[nan_col_res_indices]] = np.nan
# if temporal values outside the reference dates are required (extrapolation) masked them to NaN
res_temp_values[~time_mask, :] = np.nan
# build 1d array for interpolation. Spline functions require monotonically increasing values of x,
# so .T is used
input_x = temp_values.T[~np.isnan(data).T]
input_y = data.T[~np.isnan(data).T]
# build interpolation function
if len(input_x) > 1:
interp_func = self.get_interpolation_function(input_x, input_y)
# interpolate non-NaN values in resampled time values
new_data[~np.isnan(res_temp_values)] = interp_func(res_temp_values[~np.isnan(res_temp_values)])
# return interpolated values
return new_data
def get_interpolation_function(self, times, series):
"""Initializes interpolation model
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param series: One dimensional array of time series
:type series: numpy.array
:return: Initialized interpolation model class
"""
if str(inspect.getmodule(self.interpolation_object))[9:14] == "numpy":
return partial(self.interpolation_object, xp=times, fp=series, left=np.nan, right=np.nan)
return self.interpolation_object(times, series, **self.interpolation_parameters)
def get_resampled_timestamp(self, timestamp):
"""Takes a list of timestamps and generates new list of timestamps according to ``resample_range``
:param timestamp: list of timestamps
:type timestamp: list(datetime.datetime)
:return: new list of timestamps
:rtype: list(datetime.datetime)
"""
if self.resample_range is None:
return timestamp
if not isinstance(self.resample_range, (tuple, list)):
raise ValueError(f"Invalid resample_range {self.resample_range}, expected tuple")
if tuple(map(type, self.resample_range)) == (str, str, int):
start_date = dateutil.parser.parse(self.resample_range[0])
end_date = dateutil.parser.parse(self.resample_range[1])
step = dt.timedelta(days=self.resample_range[2])
days = [start_date]
while days[-1] + step < end_date:
days.append(days[-1] + step)
elif self.resample_range and np.all([isinstance(date, str) for date in self.resample_range]):
days = [dateutil.parser.parse(date) for date in self.resample_range]
elif self.resample_range and np.all([isinstance(date, dt.datetime) for date in self.resample_range]):
days = list(self.resample_range)
else:
raise ValueError("Invalid format in {self.resample_range}, expected strings or datetimes")
return days
def execute(self, eopatch):
"""Execute method that processes EOPatch and returns EOPatch"""
# pylint: disable=too-many-locals
feature_type, feature_name, new_feature_name = self.renamed_feature
# Make a copy not to change original numpy array
feature_data = eopatch[feature_type][feature_name].copy()
time_num, height, width, band_num = feature_data.shape
if time_num <= 1:
raise ValueError(
f"Feature {(feature_type, feature_name)} has time dimension of size {time_num}, "
"required at least size 2"
)
# Apply a mask on data
if self.mask_feature_parser is not None:
for mask_type, mask_name in self.mask_feature_parser.get_features(eopatch):
negated_mask = ~eopatch[mask_type][mask_name].astype(bool)
feature_data = self._mask_feature_data(feature_data, negated_mask, mask_type)
# Flatten array
feature_data = np.reshape(feature_data, (time_num, height * width * band_num))
# If resampling create new EOPatch
new_eopatch = EOPatch() if self.resample_range else eopatch
# Resample times
times = eopatch.get_time_series(scale_time=self.scale_time)
new_eopatch.timestamp = self.get_resampled_timestamp(eopatch.timestamp)
total_diff = int((new_eopatch.timestamp[0].date() - eopatch.timestamp[0].date()).total_seconds())
resampled_times = new_eopatch.get_time_series(scale_time=self.scale_time) + total_diff // self.scale_time
# Add BBox to eopatch if it was created anew
if new_eopatch.bbox is None:
new_eopatch.bbox = eopatch.bbox
# Replace duplicate acquisitions which have same values on the chosen time scale with their average
feature_data, times = self._get_unique_times(feature_data, times)
# Interpolate
feature_data = self.interpolate_data(feature_data, times, resampled_times)
# Normalize
if self.result_interval:
min_val, max_val = self.result_interval
valid_mask = ~np.isnan(feature_data)
feature_data[valid_mask] = np.maximum(np.minimum(feature_data[valid_mask], max_val), min_val)
# Replace unknown value
if not np.isnan(self.unknown_value):
feature_data[np.isnan(feature_data)] = self.unknown_value
# Reshape back
new_eopatch[feature_type][new_feature_name] = np.reshape(
feature_data, (feature_data.shape[0], height, width, band_num)
)
# append features from old patch
new_eopatch = self._copy_old_features(new_eopatch, eopatch)
return new_eopatch
class LinearInterpolationTask(InterpolationTask):
"""Implements `eolearn.features.InterpolationTask` by using `numpy.interp` and `@numba.jit(nopython=True)`
:param parallel: interpolation is calculated in parallel using as many CPUs as detected
by the multiprocessing module.
:type parallel: bool
:param kwargs: parameters of InterpolationTask(EOTask)
"""
def __init__(self, feature, parallel=False, **kwargs):
self.parallel = parallel
super().__init__(feature, np.interp, **kwargs)
def interpolate_data(self, data, times, resampled_times):
"""Interpolates data feature
:param data: Array in a shape of t x nobs, where nobs = h x w x n
:type data: numpy.ndarray
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param resampled_times: Array of reference times in second relative to the first timestamp in initial timestamp
array.
:type resampled_times: numpy.array
:return: Array of interpolated values
:rtype: numpy.ndarray
"""
if self.parallel:
return interpolation_function_parallel(data, times, resampled_times)
return interpolation_function(data, times, resampled_times)
class CubicInterpolationTask(InterpolationTask):
"""
Implements `eolearn.features.InterpolationTask` by using `scipy.interpolate.interp1d(kind='cubic')`
"""
def __init__(self, feature, **kwargs):
super().__init__(feature, scipy.interpolate.interp1d, kind="cubic", **kwargs)
class SplineInterpolationTask(InterpolationTask):
"""
Implements `eolearn.features.InterpolationTask` by using `scipy.interpolate.UnivariateSpline`
"""
def __init__(self, feature, *, spline_degree=3, smoothing_factor=0, **kwargs):
super().__init__(feature, scipy.interpolate.UnivariateSpline, k=spline_degree, s=smoothing_factor, **kwargs)
class BSplineInterpolationTask(InterpolationTask):
"""
Implements `eolearn.features.InterpolationTask` by using `scipy.interpolate.BSpline`
"""
def __init__(self, feature, *, spline_degree=3, **kwargs):
super().__init__(feature, scipy.interpolate.make_interp_spline, k=spline_degree, **kwargs)
class AkimaInterpolationTask(InterpolationTask):
"""
Implements `eolearn.features.InterpolationTask` by using `scipy.interpolate.Akima1DInterpolator`
"""
def __init__(self, feature, **kwargs):
super().__init__(feature, scipy.interpolate.Akima1DInterpolator, **kwargs)
class KrigingObject:
"""
Interpolation function like object for Kriging
"""
def __init__(self, times, series, **kwargs):
self.regressor = GaussianProcessRegressor(**kwargs)
# Since most of data is close to zero (relatively to time points), first get time data in [0,1] range
# to ensure nonzero results
# Should normalize by max in resample time to be totally consistent,
# but this works fine (0.03% error in testing)
self.normalizing_factor = max(times) - min(times)
self.regressor.fit(times.reshape(-1, 1) / self.normalizing_factor, series)
self.call_args = kwargs.get("call_args", {})
def __call__(self, new_times, **kwargs):
call_args = self.call_args.copy()
call_args.update(kwargs)
return self.regressor.predict(new_times.reshape(-1, 1) / self.normalizing_factor, **call_args)
class KrigingInterpolationTask(InterpolationTask):
"""
Implements `eolearn.features.InterpolationTask` by using `sklearn.gaussian_process.GaussianProcessRegressor`
Gaussian processes (superset of kriging) are especially used in geological missing data estimation.
Compared to spline interpolation, gaussian processes produce much more smoothed results
(which may or may not be desirable).
"""
def __init__(self, feature, **kwargs):
super().__init__(feature, KrigingObject, interpolate_pixel_wise=True, **kwargs)
class ResamplingTask(InterpolationTask):
"""
A subclass of InterpolationTask task that works only with data with no missing, masked or
invalid values. It always resamples timeseries to different timestamps.
"""
def __init__(
self,
feature,
interpolation_object,
resample_range,
*,
result_interval=None,
unknown_value=np.nan,
**interpolation_parameters,
):
if resample_range is None:
raise ValueError("resample_range parameter must be in form ('start_date', 'end_date', step_days)")
super().__init__(
feature,
interpolation_object,
resample_range=resample_range,
result_interval=result_interval,
unknown_value=unknown_value,
**interpolation_parameters,
)
def interpolate_data(self, data, times, resampled_times):
"""Interpolates data feature
:param data: Array in a shape of t x nobs, where nobs = h x w x n
:type data: numpy.ndarray
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param resampled_times: Array of reference times in second relative to the first timestamp in initial timestamp
array.
:type resampled_times: numpy.array
:return: Array of interpolated values
:rtype: numpy.ndarray
"""
if True in np.unique(np.isnan(data)):
raise ValueError("Data must not contain any masked/invalid pixels or NaN values")
interp_func = self.get_interpolation_function(times, data)
time_mask = (resampled_times >= np.min(times)) & (resampled_times <= np.max(times))
new_data = np.full((resampled_times.size,) + data.shape[1:], np.nan, dtype=data.dtype)
new_data[time_mask] = interp_func(resampled_times[time_mask])
return new_data
def get_interpolation_function(self, times, series):
"""Initializes interpolation model
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param series: One dimensional array of time series
:type series: numpy.array
:return: Initialized interpolation model class
"""
return self.interpolation_object(times, series, axis=0, **self.interpolation_parameters)
class NearestResamplingTask(ResamplingTask):
"""
Implements `eolearn.features.ResamplingTask` by using `scipy.interpolate.interp1d(kind='nearest')`
"""
def __init__(self, feature, resample_range, **kwargs):
super().__init__(feature, scipy.interpolate.interp1d, resample_range, kind="nearest", **kwargs)
class LinearResamplingTask(ResamplingTask):
"""
Implements `eolearn.features.ResamplingTask` by using `scipy.interpolate.interp1d(kind='linear')`
"""
def __init__(self, feature, resample_range, **kwargs):
super().__init__(feature, scipy.interpolate.interp1d, resample_range, kind="linear", **kwargs)
class CubicResamplingTask(ResamplingTask):
"""
Implements `eolearn.features.ResamplingTask` by using `scipy.interpolate.interp1d(kind='cubic')`
"""
def __init__(self, feature, resample_range, **kwargs):
super().__init__(feature, scipy.interpolate.interp1d, resample_range, kind="cubic", **kwargs)
| 42.815089
| 120
| 0.675569
|
f1d12e90d1036a74c57f1b2fc14a287e73f72f3d
| 304
|
py
|
Python
|
bdd/entry_scenarios.py
|
nataliasviattseva/python_training
|
04572706948c90bba9714cb082e7ee1e2fcb52f1
|
[
"Apache-2.0"
] | null | null | null |
bdd/entry_scenarios.py
|
nataliasviattseva/python_training
|
04572706948c90bba9714cb082e7ee1e2fcb52f1
|
[
"Apache-2.0"
] | null | null | null |
bdd/entry_scenarios.py
|
nataliasviattseva/python_training
|
04572706948c90bba9714cb082e7ee1e2fcb52f1
|
[
"Apache-2.0"
] | null | null | null |
from pytest_bdd import scenario
from .entry_steps import *
@scenario('entries.feature', 'Add new entry')
def test_add_new_entry():
pass
@scenario('entries.feature', 'Edit the entry')
def test_edit_entry():
pass
@scenario('entries.feature', 'Delete the entry')
def test_delete_entry():
pass
| 21.714286
| 48
| 0.733553
|
49f6ec66b2f72a52234ec5421420dc35c4beb5aa
| 5,383
|
py
|
Python
|
quidel/delphi_quidel/run.py
|
qx-teo/covidcast-indicators
|
6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a
|
[
"MIT"
] | null | null | null |
quidel/delphi_quidel/run.py
|
qx-teo/covidcast-indicators
|
6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a
|
[
"MIT"
] | 5
|
2021-08-18T17:33:13.000Z
|
2021-08-19T15:09:22.000Z
|
quidel/delphi_quidel/run.py
|
qx-teo/covidcast-indicators
|
6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Functions to call when running the function.
This module should contain a function called `run_module`, that is executed
when the module is run with `python -m MODULE_NAME`.
"""
import time
from os.path import join
from typing import Dict, Any
import pandas as pd
from delphi_utils import (
add_prefix,
create_export_csv,
get_structured_logger
)
from .constants import (END_FROM_TODAY_MINUS, EXPORT_DAY_RANGE,
GEO_RESOLUTIONS, SENSORS)
from .generate_sensor import (generate_sensor_for_states,
generate_sensor_for_other_geores)
from .geo_maps import geo_map
from .pull import (pull_quidel_data,
check_export_start_date,
check_export_end_date,
update_cache_file)
def run_module(params: Dict[str, Any]):
"""Run Quidel flu test module.
The `params` argument is expected to have the following structure:
- "common":
- "export_dir": str, directory to write output
- "log_exceptions" (optional): bool, whether to log exceptions to file
- "log_filename" (optional): str, name of file to write logs
- indicator":
- "static_file_dir": str, directory name with population information
- "input_cache_dir": str, directory in which to cache input data
- "export_start_date": str, YYYY-MM-DD format of earliest date to create output
- "export_end_date": str, YYYY-MM-DD format of latest date to create output or "" to create
through the present
- "pull_start_date": str, YYYY-MM-DD format of earliest date to pull input
- "pull_end_date": str, YYYY-MM-DD format of latest date to create output or "" to create
through the present
- "aws_credentials": Dict[str, str], authentication parameters for AWS S3; see S3
documentation
- "bucket_name": str, name of AWS bucket in which to find data
- "wip_signal": List[str], list of signal names that are works in progress
- "test_mode": bool, whether we are running in test mode
"""
start_time = time.time()
logger = get_structured_logger(
__name__, filename=params["common"].get("log_filename"),
log_exceptions=params["common"].get("log_exceptions", True))
cache_dir = params["indicator"]["input_cache_dir"]
export_dir = params["common"]["export_dir"]
static_file_dir = params["indicator"]["static_file_dir"]
export_start_dates = params["indicator"]["export_start_date"]
export_end_dates = params["indicator"]["export_end_date"]
map_df = pd.read_csv(
join(static_file_dir, "fips_prop_pop.csv"), dtype={"fips": int}
)
# Pull data and update export date
dfs, _end_date = pull_quidel_data(params["indicator"])
if _end_date is None:
print("The data is up-to-date. Currently, no new data to be ingested.")
return
export_end_dates = check_export_end_date(export_end_dates, _end_date,
END_FROM_TODAY_MINUS)
export_start_dates = check_export_start_date(export_start_dates,
export_end_dates,
EXPORT_DAY_RANGE)
# Add prefix, if required
sensors = add_prefix(list(SENSORS.keys()),
wip_signal=params["indicator"]["wip_signal"],
prefix="wip_")
for sensor in sensors:
# Check either covid_ag or flu_ag
test_type = "covid_ag" if "covid_ag" in sensor else "flu_ag"
print("state", sensor)
data = dfs[test_type].copy()
state_groups = geo_map("state", data, map_df).groupby("state_id")
first_date, last_date = data["timestamp"].min(), data["timestamp"].max()
# For State Level
state_df = generate_sensor_for_states(
state_groups, smooth=SENSORS[sensor][1],
device=SENSORS[sensor][0], first_date=first_date,
last_date=last_date)
create_export_csv(state_df, geo_res="state", sensor=sensor, export_dir=export_dir,
start_date=export_start_dates[test_type],
end_date=export_end_dates[test_type])
# County/HRR/MSA level
for geo_res in GEO_RESOLUTIONS:
print(geo_res, sensor)
data = dfs[test_type].copy()
data, res_key = geo_map(geo_res, data, map_df)
res_df = generate_sensor_for_other_geores(
state_groups, data, res_key, smooth=SENSORS[sensor][1],
device=SENSORS[sensor][0], first_date=first_date,
last_date=last_date)
create_export_csv(res_df, geo_res=geo_res, sensor=sensor, export_dir=export_dir,
start_date=export_start_dates[test_type],
end_date=export_end_dates[test_type],
remove_null_samples=True)
# Export the cache file if the pipeline runs successfully.
# Otherwise, don't update the cache file
update_cache_file(dfs, _end_date, cache_dir)
elapsed_time_in_seconds = round(time.time() - start_time, 2)
logger.info("Completed indicator run",
elapsed_time_in_seconds = elapsed_time_in_seconds)
| 45.235294
| 99
| 0.634033
|
ec34ada8d083b66e5a2d3e2434f20cfd304f5fc7
| 10,999
|
py
|
Python
|
kubernetes/client/models/v1_priority_class.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_priority_class.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_priority_class.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PriorityClass(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'description': 'str',
'global_default': 'bool',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'preemption_policy': 'str',
'value': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'description': 'description',
'global_default': 'globalDefault',
'kind': 'kind',
'metadata': 'metadata',
'preemption_policy': 'preemptionPolicy',
'value': 'value'
}
def __init__(self, api_version=None, description=None, global_default=None, kind=None, metadata=None, preemption_policy=None, value=None, local_vars_configuration=None): # noqa: E501
"""V1PriorityClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._description = None
self._global_default = None
self._kind = None
self._metadata = None
self._preemption_policy = None
self._value = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if description is not None:
self.description = description
if global_default is not None:
self.global_default = global_default
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if preemption_policy is not None:
self.preemption_policy = preemption_policy
self.value = value
@property
def api_version(self):
"""Gets the api_version of this V1PriorityClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PriorityClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PriorityClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def description(self):
"""Gets the description of this V1PriorityClass. # noqa: E501
description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
:return: The description of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1PriorityClass.
description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
:param description: The description of this V1PriorityClass. # noqa: E501
:type: str
"""
self._description = description
@property
def global_default(self):
"""Gets the global_default of this V1PriorityClass. # noqa: E501
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
:return: The global_default of this V1PriorityClass. # noqa: E501
:rtype: bool
"""
return self._global_default
@global_default.setter
def global_default(self, global_default):
"""Sets the global_default of this V1PriorityClass.
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
:param global_default: The global_default of this V1PriorityClass. # noqa: E501
:type: bool
"""
self._global_default = global_default
@property
def kind(self):
"""Gets the kind of this V1PriorityClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PriorityClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PriorityClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PriorityClass. # noqa: E501
:return: The metadata of this V1PriorityClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PriorityClass.
:param metadata: The metadata of this V1PriorityClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def preemption_policy(self):
"""Gets the preemption_policy of this V1PriorityClass. # noqa: E501
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate. # noqa: E501
:return: The preemption_policy of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._preemption_policy
@preemption_policy.setter
def preemption_policy(self, preemption_policy):
"""Sets the preemption_policy of this V1PriorityClass.
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate. # noqa: E501
:param preemption_policy: The preemption_policy of this V1PriorityClass. # noqa: E501
:type: str
"""
self._preemption_policy = preemption_policy
@property
def value(self):
"""Gets the value of this V1PriorityClass. # noqa: E501
The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
:return: The value of this V1PriorityClass. # noqa: E501
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1PriorityClass.
The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
:param value: The value of this V1PriorityClass. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PriorityClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PriorityClass):
return True
return self.to_dict() != other.to_dict()
| 37.927586
| 411
| 0.654878
|
7ff96a1ee6fed093503a23fbf6ba09f35240435e
| 6,540
|
py
|
Python
|
scripts-dev/build_debian_packages.py
|
buffless-matt/synapse
|
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
|
[
"Apache-2.0"
] | null | null | null |
scripts-dev/build_debian_packages.py
|
buffless-matt/synapse
|
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
|
[
"Apache-2.0"
] | 1
|
2022-03-23T08:03:58.000Z
|
2022-03-23T08:03:58.000Z
|
scripts-dev/build_debian_packages.py
|
buffless-matt/synapse
|
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
|
[
"Apache-2.0"
] | 1
|
2022-03-31T09:03:27.000Z
|
2022-03-31T09:03:27.000Z
|
#!/usr/bin/env python3
# Build the Debian packages using Docker images.
#
# This script builds the Docker images and then executes them sequentially, each
# one building a Debian package for the targeted operating system. It is
# designed to be a "single command" to produce all the images.
#
# By default, builds for all known distributions, but a list of distributions
# can be passed on the commandline for debugging.
import argparse
import json
import os
import signal
import subprocess
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Sequence
DISTS = (
"debian:buster", # oldstable: EOL 2022-08
"debian:bullseye",
"debian:bookworm",
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:impish", # 21.10 (EOL 2022-07)
)
DESC = """\
Builds .debs for synapse, using a Docker image for the build environment.
By default, builds for all known distributions, but a list of distributions
can be passed on the commandline for debugging.
"""
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class Builder(object):
def __init__(
self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None
):
self.redirect_stdout = redirect_stdout
self._docker_build_args = tuple(docker_build_args or ())
self.active_containers = set()
self._lock = threading.Lock()
self._failed = False
def run_build(self, dist, skip_tests=False):
"""Build deb for a single distribution"""
if self._failed:
print("not building %s due to earlier failure" % (dist,))
raise Exception("failed")
try:
self._inner_build(dist, skip_tests)
except Exception as e:
print("build of %s failed: %s" % (dist, e), file=sys.stderr)
self._failed = True
raise
def _inner_build(self, dist, skip_tests=False):
tag = dist.split(":", 1)[1]
# Make the dir where the debs will live.
#
# Note that we deliberately put this outside the source tree, otherwise
# we tend to get source packages which are full of debs. (We could hack
# around that with more magic in the build_debian.sh script, but that
# doesn't solve the problem for natively-run dpkg-buildpakage).
debsdir = os.path.join(projdir, "../debs")
os.makedirs(debsdir, exist_ok=True)
if self.redirect_stdout:
logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
print("building %s: directing output to %s" % (dist, logfile))
stdout = open(logfile, "w")
else:
stdout = None
# first build a docker image for the build environment
build_args = (
(
"docker",
"build",
"--tag",
"dh-venv-builder:" + tag,
"--build-arg",
"distro=" + dist,
"-f",
"docker/Dockerfile-dhvirtualenv",
)
+ self._docker_build_args
+ ("docker",)
)
subprocess.check_call(
build_args,
stdout=stdout,
stderr=subprocess.STDOUT,
cwd=projdir,
)
container_name = "synapse_build_" + tag
with self._lock:
self.active_containers.add(container_name)
# then run the build itself
subprocess.check_call(
[
"docker",
"run",
"--rm",
"--name",
container_name,
"--volume=" + projdir + ":/synapse/source:ro",
"--volume=" + debsdir + ":/debs",
"-e",
"TARGET_USERID=%i" % (os.getuid(),),
"-e",
"TARGET_GROUPID=%i" % (os.getgid(),),
"-e",
"DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
"dh-venv-builder:" + tag,
],
stdout=stdout,
stderr=subprocess.STDOUT,
)
with self._lock:
self.active_containers.remove(container_name)
if stdout is not None:
stdout.close()
print("Completed build of %s" % (dist,))
def kill_containers(self):
with self._lock:
active = list(self.active_containers)
for c in active:
print("killing container %s" % (c,))
subprocess.run(
[
"docker",
"kill",
c,
],
stdout=subprocess.DEVNULL,
)
with self._lock:
self.active_containers.remove(c)
def run_builds(builder, dists, jobs=1, skip_tests=False):
def sig(signum, _frame):
print("Caught SIGINT")
builder.kill_containers()
signal.signal(signal.SIGINT, sig)
with ThreadPoolExecutor(max_workers=jobs) as e:
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
# make sure we consume the iterable so that exceptions are raised.
for _ in res:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=DESC,
)
parser.add_argument(
"-j",
"--jobs",
type=int,
default=1,
help="specify the number of builds to run in parallel",
)
parser.add_argument(
"--no-check",
action="store_true",
help="skip running tests after building",
)
parser.add_argument(
"--docker-build-arg",
action="append",
help="specify an argument to pass to docker build",
)
parser.add_argument(
"--show-dists-json",
action="store_true",
help="instead of building the packages, just list the dists to build for, as a json array",
)
parser.add_argument(
"dist",
nargs="*",
default=DISTS,
help="a list of distributions to build for. Default: %(default)s",
)
args = parser.parse_args()
if args.show_dists_json:
print(json.dumps(DISTS))
else:
builder = Builder(
redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg
)
run_builds(
builder,
dists=args.dist,
jobs=args.jobs,
skip_tests=args.no_check,
)
| 30
| 99
| 0.562844
|
d3f9540a9f6278c870263b97438527bddb09fa9a
| 10,483
|
py
|
Python
|
case-design/a3c.py
|
transys-project/metis
|
3a3cdf5ba852b543c2398762364bbfd663b4ebf2
|
[
"MIT"
] | 50
|
2020-06-07T11:50:30.000Z
|
2022-03-30T07:51:10.000Z
|
case-design/a3c.py
|
transys-project/Metis
|
3a3cdf5ba852b543c2398762364bbfd663b4ebf2
|
[
"MIT"
] | 2
|
2020-08-27T08:52:33.000Z
|
2021-09-02T06:25:28.000Z
|
case-design/a3c.py
|
transys-project/Metis
|
3a3cdf5ba852b543c2398762364bbfd663b4ebf2
|
[
"MIT"
] | 16
|
2020-05-08T12:53:27.000Z
|
2022-02-28T10:34:07.000Z
|
import numpy as np
import tensorflow as tf
import tflearn
GAMMA = 0.99
A_DIM = 6
ENTROPY_WEIGHT = 0.5
ENTROPY_EPS = 1e-6
S_INFO = 4
class ActorNetwork(object):
"""
Input to the network is the state, output is the distribution
of all actions.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.lr_rate = learning_rate
# Create the actor network
self.inputs, self.out = self.create_actor_network()
# Get all network parameters
self.network_params = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = []
for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append(self.network_params[idx].assign(param))
# Selected action, 0-1 vector
self.acts = tf.placeholder(tf.float32, [None, self.a_dim])
# This gradient will be provided by the critic network
self.act_grad_weights = tf.placeholder(tf.float32, [None, 1])
# Compute the objective (log action_vector and entropy)
self.obj = tf.reduce_sum(tf.multiply(
tf.log(tf.reduce_sum(tf.multiply(self.out, self.acts),
reduction_indices=1, keep_dims=True)),
-self.act_grad_weights)) \
+ ENTROPY_WEIGHT * tf.reduce_sum(tf.multiply(self.out,
tf.log(self.out + ENTROPY_EPS)))
# Combine the gradients here
self.actor_gradients = tf.gradients(self.obj, self.network_params)
# Optimization Op
self.optimize = tf.train.RMSPropOptimizer(self.lr_rate).\
apply_gradients(zip(self.actor_gradients, self.network_params))
def create_actor_network(self):
with tf.variable_scope('actor'):
inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')
split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')
split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')
split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
split_2_flat = tflearn.flatten(split_2)
split_3_flat = tflearn.flatten(split_3)
split_4_flat = tflearn.flatten(split_4)
merge_net = tflearn.merge([split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')
dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
merge_net_1 = tflearn.merge([split_0, dense_net_0], 'concat')
out = tflearn.fully_connected(merge_net_1, self.a_dim, activation='softmax')
return inputs, out
def train(self, inputs, acts, act_grad_weights):
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.acts: acts,
self.act_grad_weights: act_grad_weights
})
def predict(self, inputs):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs
})
def get_gradients(self, inputs, acts, act_grad_weights):
return self.sess.run(self.actor_gradients, feed_dict={
self.inputs: inputs,
self.acts: acts,
self.act_grad_weights: act_grad_weights
})
def apply_gradients(self, actor_gradients):
return self.sess.run(self.optimize, feed_dict={
i: d for i, d in zip(self.actor_gradients, actor_gradients)
})
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
class CriticNetwork(object):
"""
Input to the network is the state and action, output is V(s).
On policy: the action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, learning_rate):
self.sess = sess
self.s_dim = state_dim
self.lr_rate = learning_rate
# Create the critic network
self.inputs, self.out = self.create_critic_network()
# Get all network parameters
self.network_params = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = []
for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append(self.network_params[idx].assign(param))
# Network target V(s)
self.td_target = tf.placeholder(tf.float32, [None, 1])
# Temporal Difference, will also be weights for actor_gradients
self.td = tf.subtract(self.td_target, self.out)
# Mean square error
self.loss = tflearn.mean_square(self.td_target, self.out)
# Compute critic gradient
self.critic_gradients = tf.gradients(self.loss, self.network_params)
# Optimization Op
self.optimize = tf.train.RMSPropOptimizer(self.lr_rate).\
apply_gradients(zip(self.critic_gradients, self.network_params))
def create_critic_network(self):
with tf.variable_scope('critic'):
inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')
split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')
split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')
split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
split_2_flat = tflearn.flatten(split_2)
split_3_flat = tflearn.flatten(split_3)
split_4_flat = tflearn.flatten(split_4)
merge_net = tflearn.merge([split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')
dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
merge_net_1 = tflearn.merge([split_0, dense_net_0], 'concat')
out = tflearn.fully_connected(merge_net_1, 1, activation='linear')
return inputs, out
def train(self, inputs, td_target):
return self.sess.run([self.loss, self.optimize], feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def predict(self, inputs):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs
})
def get_td(self, inputs, td_target):
return self.sess.run(self.td, feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def get_gradients(self, inputs, td_target):
return self.sess.run(self.critic_gradients, feed_dict={
self.inputs: inputs,
self.td_target: td_target
})
def apply_gradients(self, critic_gradients):
return self.sess.run(self.optimize, feed_dict={
i: d for i, d in zip(self.critic_gradients, critic_gradients)
})
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
def compute_gradients(s_batch, a_batch, r_batch, terminal, actor, critic):
"""
batch of s, a, r is from samples in a sequence
the format is in np.array([batch_size, s/a/r_dim])
terminal is True when sequence ends as a terminal state
"""
assert s_batch.shape[0] == a_batch.shape[0]
assert s_batch.shape[0] == r_batch.shape[0]
ba_size = s_batch.shape[0]
v_batch = critic.predict(s_batch)
R_batch = np.zeros(r_batch.shape)
if terminal:
R_batch[-1, 0] = 0 # terminal state
else:
R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state
for t in reversed(xrange(ba_size - 1)):
R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]
td_batch = R_batch - v_batch
actor_gradients = actor.get_gradients(s_batch, a_batch, td_batch)
critic_gradients = critic.get_gradients(s_batch, R_batch)
return actor_gradients, critic_gradients, td_batch
def discount(x, gamma):
"""
Given vector x, computes a vector y such that
y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...
"""
out = np.zeros(len(x))
out[-1] = x[-1]
for i in reversed(xrange(len(x)-1)):
out[i] = x[i] + gamma*out[i+1]
assert x.ndim >= 1
# More efficient version:
# scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
return out
def compute_entropy(x):
"""
Given vector x, computes the entropy
H(x) = - sum( p * log(p))
"""
H = 0.0
for i in xrange(len(x)):
if 0 < x[i] < 1:
H -= x[i] * np.log(x[i])
return H
def build_summaries():
td_loss = tf.Variable(0.)
tf.summary.scalar("TD_loss", td_loss)
eps_total_reward = tf.Variable(0.)
tf.summary.scalar("Eps_total_reward", eps_total_reward)
avg_entropy = tf.Variable(0.)
tf.summary.scalar("Avg_entropy", avg_entropy)
summary_vars = [td_loss, eps_total_reward, avg_entropy]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
| 36.273356
| 109
| 0.627969
|
a18a69a94bb3b07f88de8062d373a3b2d27c4521
| 6,194
|
py
|
Python
|
awx/main/tests/functional/test_rbac_team.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/main/tests/functional/test_rbac_team.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/main/tests/functional/test_rbac_team.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
import pytest
from unittest import mock
from awx.main.access import TeamAccess
from awx.main.models import Project, Organization, Team
@pytest.mark.django_db
def test_team_attach_unattach(team, user):
u = user('member', False)
access = TeamAccess(u)
team.member_role.members.add(u)
assert not access.can_attach(team, team.member_role, 'member_role.children', None)
assert not access.can_unattach(team, team.member_role, 'member_role.children')
team.admin_role.members.add(u)
assert access.can_attach(team, team.member_role, 'member_role.children', None)
assert access.can_unattach(team, team.member_role, 'member_role.children')
u2 = user('non-member', False)
access = TeamAccess(u2)
assert not access.can_attach(team, team.member_role, 'member_role.children', None)
assert not access.can_unattach(team, team.member_role, 'member_role.children')
@pytest.mark.django_db
@pytest.mark.parametrize('ext_auth', [True, False])
def test_team_org_resource_role(ext_auth, team, user, rando):
with mock.patch('awx.main.access.settings') as settings_mock:
settings_mock.MANAGE_ORGANIZATION_AUTH = ext_auth
u = user('member', False)
team.organization.admin_role.members.add(u)
access = TeamAccess(u)
assert access.can_attach(team, rando, 'member_role.members') == ext_auth
team.member_role.members.add(rando)
assert access.can_unattach(team, rando, 'member_role.members') == ext_auth
@pytest.mark.django_db
def test_team_access_superuser(team, user):
team.member_role.members.add(user('member', False))
access = TeamAccess(user('admin', True))
assert access.can_add(None)
assert access.can_change(team, None)
assert access.can_delete(team)
t = access.get_queryset()[0]
assert len(t.member_role.members.all()) == 1
assert len(t.organization.admin_role.members.all()) == 0
@pytest.mark.django_db
def test_team_access_org_admin(organization, team, user):
a = user('admin', False)
organization.admin_role.members.add(a)
team.organization = organization
team.save()
access = TeamAccess(a)
assert access.can_add({'organization': organization.pk})
assert access.can_change(team, None)
assert access.can_delete(team)
t = access.get_queryset()[0]
assert len(t.member_role.members.all()) == 0
assert len(t.organization.admin_role.members.all()) == 1
@pytest.mark.django_db
def test_team_access_member(organization, team, user):
u = user('member', False)
team.member_role.members.add(u)
team.organization = organization
team.save()
access = TeamAccess(u)
assert not access.can_add({'organization': organization.pk})
assert not access.can_change(team, None)
assert not access.can_delete(team)
t = access.get_queryset()[0]
assert len(t.member_role.members.all()) == 1
assert len(t.organization.admin_role.members.all()) == 0
@pytest.mark.django_db
def test_team_accessible_by(team, user, project):
u = user('team_member', False)
team.member_role.children.add(project.use_role)
assert team in project.read_role
assert u not in project.read_role
team.member_role.members.add(u)
assert u in project.read_role
@pytest.mark.django_db
def test_team_accessible_objects(team, user, project):
u = user('team_member', False)
team.member_role.children.add(project.use_role)
assert len(Project.accessible_objects(team, 'read_role')) == 1
assert not Project.accessible_objects(u, 'read_role')
team.member_role.members.add(u)
assert len(Project.accessible_objects(u, 'read_role')) == 1
@pytest.mark.django_db
def test_team_admin_member_access(team, user, project):
u = user('team_admin', False)
team.member_role.children.add(project.use_role)
team.admin_role.members.add(u)
assert len(Project.accessible_objects(u, 'use_role')) == 1
@pytest.mark.django_db
def test_team_member_org_role_access_project(team, rando, project, organization):
team.member_role.members.add(rando)
assert rando not in project.read_role
team.member_role.children.add(organization.project_admin_role)
assert rando in project.admin_role
@pytest.mark.django_db
def test_team_member_org_role_access_workflow(team, rando, workflow_job_template, organization):
team.member_role.members.add(rando)
assert rando not in workflow_job_template.read_role
team.member_role.children.add(organization.workflow_admin_role)
assert rando in workflow_job_template.admin_role
@pytest.mark.django_db
def test_team_member_org_role_access_inventory(team, rando, inventory, organization):
team.member_role.members.add(rando)
assert rando not in inventory.read_role
team.member_role.children.add(organization.inventory_admin_role)
assert rando in inventory.admin_role
@pytest.mark.django_db
def test_org_admin_team_access(organization, team, user, project):
u = user('team_admin', False)
organization.admin_role.members.add(u)
team.organization = organization
team.save()
team.member_role.children.add(project.use_role)
assert len(Project.accessible_objects(u, 'use_role')) == 1
@pytest.mark.django_db
@pytest.mark.parametrize('enabled', [True, False])
def test_org_admin_view_all_teams(org_admin, enabled):
access = TeamAccess(org_admin)
other_org = Organization.objects.create(name='other-org')
other_team = Team.objects.create(name='other-team', organization=other_org)
with mock.patch('awx.main.access.settings') as settings_mock:
settings_mock.ORG_ADMINS_CAN_SEE_ALL_USERS = enabled
assert access.can_read(other_team) is enabled
@pytest.mark.django_db
def test_team_member_read(rando, organization, team):
assert team.organization == organization
organization.member_role.members.add(rando)
assert TeamAccess(rando).can_read(team)
assert team in TeamAccess(rando).get_queryset()
@pytest.mark.django_db
def test_team_list_no_duplicate_entries(rando, organization, team):
organization.member_role.members.add(rando)
team.read_role.members.add(rando)
assert list(TeamAccess(rando).get_queryset()) == [team]
| 33.663043
| 96
| 0.746529
|
c858f0bba4b8e32a4ea7ee4d3b1fdb4401774b39
| 777
|
py
|
Python
|
corpus/term_to_id_make.py
|
kwashio/filling_missing_path
|
b5ab85c9d1e42ee47857299df3fcd79b6127e1fe
|
[
"Apache-2.0"
] | null | null | null |
corpus/term_to_id_make.py
|
kwashio/filling_missing_path
|
b5ab85c9d1e42ee47857299df3fcd79b6127e1fe
|
[
"Apache-2.0"
] | null | null | null |
corpus/term_to_id_make.py
|
kwashio/filling_missing_path
|
b5ab85c9d1e42ee47857299df3fcd79b6127e1fe
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_prefix', help='the Wikipedia dump file')
args = parser.parse_args()
alp = ['a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r',
's', 't']
terms = set()
for i in alp:
with open(args.input_prefix+'_a{}_parsed'.format(i), 'r') as f:
for line in f:
w1, w2, path = line.strip().split('\t')
terms.add(w1)
terms.add(w2)
term_to_id = {w:i for (i, w) in enumerate(terms)}
id_to_term = {i:w for (i, w) in enumerate(terms)}
with open('term_to_id.dump', 'wb') as f:
pickle.dump(term_to_id,f)
with open('id_to_term.dump', 'wb') as f:
pickle.dump(id_to_term,f)
| 24.28125
| 67
| 0.56242
|
92dfedc52e8def9094240023055dd249c3968435
| 5,364
|
py
|
Python
|
sweet_cms/applications/eventy/utils.py
|
SunilPragroot/sweet
|
d048126007fd892cb8b6f80348920c8c5152a85c
|
[
"MIT"
] | 1
|
2021-04-16T14:25:36.000Z
|
2021-04-16T14:25:36.000Z
|
sweet_cms/applications/eventy/utils.py
|
SunilPragroot/sweet
|
d048126007fd892cb8b6f80348920c8c5152a85c
|
[
"MIT"
] | 94
|
2020-12-31T06:37:49.000Z
|
2022-03-10T14:07:47.000Z
|
sweet_cms/applications/eventy/utils.py
|
SunilPragroot/sweet
|
d048126007fd892cb8b6f80348920c8c5152a85c
|
[
"MIT"
] | null | null | null |
import json
import os
import base64
import errno
import re
from sweet_cms.extensions import db
def ensure_dir_exists(dir_path):
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def save_base64file(file_path, file_name, data):
ensure_dir_exists(file_path)
file_full_path = os.path.join(file_path, file_name)
data = ','.join(data.split(',')[1:])
data = bytes(data, 'utf-8')
with open(file_full_path, "wb") as fh:
fh.write(base64.decodebytes(data))
def save_base64video(video_upload_path, file_name, data):
ensure_dir_exists(video_upload_path)
file_full_path = os.path.join(video_upload_path, file_name)
with open(file_full_path, 'wb+') as destination:
for chunk in data.chunks():
destination.write(chunk)
def m3u8_fix(file_path, prefix):
if os.path.exists(file_path):
file_dir = os.path.dirname(file_path)
reader = open(file_path, 'r')
original_text = reader.read()
new_text = ""
reader.close()
for line in original_text.splitlines():
if '.m3u8' in line:
new_line = os.path.join(prefix, line)
new_play_list_path = os.path.join(file_dir, line)
m3u8_fix(new_play_list_path, prefix)
elif '.ts' in line:
new_line = os.path.join(prefix, line)
else:
new_line = line
new_text = new_text + new_line + "\n"
writer = open(file_path, 'w')
writer.write(new_text)
writer.close()
def process_video(file_id, **kwargs):
from sweet_cms.models import ProgrammeFile
from libs.sweet_apps import get_sweet_app
from autoapp import app
import ffmpeg_streaming
from ffmpeg_streaming import Formats
import ffmpeg
from shutil import copy
from flask import url_for
with app.app_context():
eventy_app = get_sweet_app('Eventy')
programme_file = ProgrammeFile.query.filter_by(id=file_id).first()
if not programme_file:
print("File Was deleted , stopping now")
return
file_path = programme_file.file_url
app.config['SERVER_NAME'] = 'localhost'
file_url = '/admin/eventy/uploads/programmes/{}/videos/'.format(programme_file.programme.id)
file_path = os.path.join(eventy_app.app_path, app.config['EVENTY_UPLOADS_DIR'], file_path)
print(file_id, programme_file, file_path, file_url)
if os.path.exists(file_path):
programme_file.file_status = 'processing'
db.session.add(programme_file)
db.session.commit()
file_dir = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
bare_file_name = '.'.join(file_name.split('.')[:-1])
new_path = os.path.join(file_dir, bare_file_name+'.mp4')
# data = ffmpeg.probe(file_path)
input_video = ffmpeg.input(file_path)
output_path = os.path.join('/tmp', bare_file_name+'.mp4')
(
ffmpeg
.output(input_video, output_path, vcodec='libx264', acodec='aac', movflags='faststart')
.overwrite_output()
.run()
)
copy(output_path, file_dir)
# os.remove(output_path)
if new_path != file_path:
os.remove(file_path)
# if "bit_rate" not in str(data):
# (
# ffmpeg
# .output(input_video, output_path, vcodec='copy', acodec='copy')
# .overwrite_output()
# .run()
# )
# copy(output_path, file_path)
# os.remove(output_path)
# video = ffmpeg_streaming.input(file_path)
# hls = video.hls(Formats.h264())
# hls.auto_generate_representations()
# hls.output(playlist_path)
# m3u8_fix(playlist_path, file_url)
data = ffmpeg.probe(new_path)
programme_file.file_details = json.dumps(data)
programme_file.file_status = 'ready'
programme_file.file_name = bare_file_name+'.mp4'
db.session.add(programme_file)
db.session.commit()
print("Done on : {}".format(str(file_path)))
def fire_programme_start(programme_id, **kwargs):
import datetime
from sweet_cms.models import Programme
from libs.sweet_apps import get_sweet_app
from autoapp import app
import socketio
with app.app_context():
programme = Programme.query.filter_by(id=programme_id).first()
if not programme:
print("Programme Was deleted , stopping now")
return
if not app.config['DEBUG']:
ws_url = "https://www.mediville.com"
path = 'sockets/socket.io'
else:
ws_url = "http://127.0.0.1:5501"
path = "socket.io"
sio = socketio.Client()
sio.connect(ws_url, socketio_path=path)
sio.emit("programme_started", {"event_id": programme.event.id, "programme_id": programme.id})
print("HI", datetime.datetime.now())
| 36.739726
| 107
| 0.596943
|
b8a7c05039de9fdeed7bb8bbdd3674f0556eeffb
| 3,046
|
py
|
Python
|
tests/settings.py
|
dwoods/django-material
|
3b159d3c42984f15bf8398fcb24fb83ebc8eeacb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings.py
|
dwoods/django-material
|
3b159d3c42984f15bf8398fcb24fb83ebc8eeacb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings.py
|
dwoods/django-material
|
3b159d3c42984f15bf8398fcb24fb83ebc8eeacb
|
[
"BSD-3-Clause"
] | null | null | null |
import django
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fn4_(9z4f8w+3!&(j2x88^ca0m0=s+aj$jp^^cf^3h740xhr3='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
# material apps
'material',
'material.frontend',
# standard django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
# test apps
'tests',
'demo',
)
if django.VERSION < (3, 0):
INSTALLED_APPS += (
'template_debug',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
SITE_ID = 1
ROOT_URLCONF = 'tests.urls'
WSGI_APPLICATION = 'tests.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'tests', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
'builtins': [
'material.templatetags.material_form',
],
'debug': True,
},
},
]
if django.VERSION < (3, 0):
TEMPLATES[0]['OPTIONS']['builtins'] += [
'template_debug.templatetags.debug_tags'
]
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
MIGRATION_MODULES = []
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "tests/static"),
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
STATIC_ROOT = os.path.join(BASE_DIR, 'deploy/static')
| 23.796875
| 71
| 0.66021
|
26efa9fbf914e80a611ad7bb38bad037091f7baf
| 2,496
|
py
|
Python
|
src/commercetools/testing/discount_codes.py
|
BramKaashoek/commercetools-python-sdk
|
4a4191d7816c921401b782d8ae37626cb32791a1
|
[
"MIT"
] | null | null | null |
src/commercetools/testing/discount_codes.py
|
BramKaashoek/commercetools-python-sdk
|
4a4191d7816c921401b782d8ae37626cb32791a1
|
[
"MIT"
] | null | null | null |
src/commercetools/testing/discount_codes.py
|
BramKaashoek/commercetools-python-sdk
|
4a4191d7816c921401b782d8ae37626cb32791a1
|
[
"MIT"
] | null | null | null |
import datetime
import typing
import uuid
from commercetools import schemas, types
from commercetools.testing import utils
from commercetools.testing.abstract import BaseModel, ServiceBackend
from commercetools.testing.utils import update_attribute
class DiscountCodesModel(BaseModel):
_resource_schema = schemas.DiscountCodeSchema
_primary_type_name = "discount-code"
def _create_from_draft(
self, draft: types.DiscountCodeDraft, id: typing.Optional[str] = None
) -> types.DiscountCode:
object_id = str(uuid.UUID(id) if id is not None else uuid.uuid4())
return types.DiscountCode(
id=str(object_id),
version=1,
name=draft.name,
description=draft.description,
code=draft.code,
cart_discounts=draft.cart_discounts,
cart_predicate=draft.cart_predicate,
is_active=draft.is_active or False,
max_applications=draft.max_applications,
max_applications_per_customer=draft.max_applications_per_customer,
groups=draft.groups or [],
references=[],
valid_from=draft.valid_from,
valid_until=draft.valid_until,
created_at=datetime.datetime.now(datetime.timezone.utc),
last_modified_at=datetime.datetime.now(datetime.timezone.utc),
custom=utils.create_from_draft(draft.custom),
)
class DiscountCodesBackend(ServiceBackend):
service_path = "discount-codes"
model_class = DiscountCodesModel
_schema_draft = schemas.DiscountCodeDraftSchema
_schema_update = schemas.DiscountCodeUpdateSchema
_schema_query_response = schemas.DiscountCodePagedQueryResponseSchema
def urls(self):
return [
("^$", "GET", self.query),
("^$", "POST", self.create),
("^(?P<id>[^/]+)$", "GET", self.get_by_id),
("^(?P<id>[^/]+)$", "POST", self.update_by_id),
]
_actions = {
"changeIsActive": update_attribute("isActive", "is_active"),
"setName": update_attribute("name", "name"),
"setDescription": update_attribute("description", "description"),
"setCartPredicate": update_attribute("cartPredicate", "cart_predicate"),
"setMaxApplications": update_attribute("maxApplications", "max_applications"),
"setMaxApplicationsPerCustomer": update_attribute(
"maxApplicationsPerCustomer", "max_applications_per_customer"
),
}
| 38.4
| 86
| 0.669071
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.