hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fec111f93a51990a6580549279d4bc1fdb55d776 | 788 | py | Python | pyvisdk/esxcli/handlers/ha_cli_handler_storage_core_path_stats.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/esxcli/handlers/ha_cli_handler_storage_core_path_stats.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/esxcli/handlers/ha_cli_handler_storage_core_path_stats.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class StorageCorePathStats(Base):
'''
Stats operations pertaining to the pluggable storage architectures' device paths on the system.
'''
moid = 'ha-cli-handler-storage-core-path-stats'
def get(self, path=None):
'''
List the SCSI stats for the SCSI Paths in the system.
:param path: string, Limit the stats output to one specific path. This path name can be the runtime name or the path UID.
:returns: vim.EsxCLI.storage.core.path.stats.get.ScsiPathStats[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.storage.core.path.stats.Get',
path=path,
) | 43.777778 | 129 | 0.649746 |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class StorageCorePathStats(Base):
'''
Stats operations pertaining to the pluggable storage architectures' device paths on the system.
'''
moid = 'ha-cli-handler-storage-core-path-stats'
def get(self, path=None):
'''
List the SCSI stats for the SCSI Paths in the system.
:param path: string, Limit the stats output to one specific path. This path name can be the runtime name or the path UID.
:returns: vim.EsxCLI.storage.core.path.stats.get.ScsiPathStats[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.storage.core.path.stats.Get',
path=path,
) | 0 | 0 | 0 |
72231b2c2e74d2130235d5ec9abb2e7fa0bfae55 | 269 | py | Python | src/yellowdog_client/model/compute_source_traits_filter.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/compute_source_traits_filter.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/compute_source_traits_filter.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import Optional
@dataclass
| 24.454545 | 43 | 0.765799 | from dataclasses import dataclass
from typing import Optional
@dataclass
class ComputeSourceTraitsFilter:
canStopStart: Optional[bool] = None
canRestart: Optional[bool] = None
canScaleOut: Optional[bool] = None
isSelfMaintained: Optional[bool] = None
| 0 | 172 | 22 |
7a27cbd111b959281707eb4f759182f97a7d02d6 | 32,025 | py | Python | opengl/gl/raw/gl_2_0.py | SilentPenguin/OpenGL.py | dd16bf7ea2fa20a7ea489e711a5df20d604c34dc | [
"Apache-2.0"
] | 1 | 2016-11-09T06:11:24.000Z | 2016-11-09T06:11:24.000Z | opengl/gl/raw/gl_2_0.py | SilentPenguin/OpenGL.py | dd16bf7ea2fa20a7ea489e711a5df20d604c34dc | [
"Apache-2.0"
] | 3 | 2016-11-09T06:21:08.000Z | 2016-11-18T15:17:22.000Z | opengl/gl/raw/gl_2_0.py | SilentPenguin/OpenGL.py | dd16bf7ea2fa20a7ea489e711a5df20d604c34dc | [
"Apache-2.0"
] | null | null | null | #BEWARE: automatically generated code
#This code was generated by /generate/__main__.py
from opengl.gl.raw.bindings import *
@accepts(t.enum, t.enum)
@returns(t.void)
@binds(dll)
def blend_equation_separate(modergb, modealpha):
'''
set the RGB blend equation and the alpha blend equation separately.
Args:
modergb: the rgb blend equation, how the red, green, and blue components
of the source and destination colors are combined.
modealpha: the alpha blend equation, how the alpha component of the
source and destination colors are combined.
'''
@accepts(t.sizei, POINTER(t.enum))
@returns(t.void)
@binds(dll)
def draw_buffers(n, bufs):
'''
Specifies a list of color buffers to be drawn into.
gl.draw_buffers and gl.named_framebuffer_draw_buffers define an array of
buffers into which outputs from the fragment shader data will be written. If
a fragment shader writes a value to one or more user defined output
variables, then the value of each variable will be written into the buffer
specified at a location within bufs corresponding to the location assigned
to that user defined output. The draw buffer used for user defined outputs
assigned to locations greater than or equal to n is implicitly set to
gl.NONE and any data written to such an output is discarded.
Args:
n: the number of buffers in bufs.
bufs: points to an array of symbolic constants specifying the buffers
into which fragment colors or data values will be written.
'''
@accepts(t.enum, t.enum, t.enum, t.enum)
@returns(t.void)
@binds(dll)
def stencil_op_separate(face, sfail, dpfail, dppass):
'''
set front and/or back stencil test actions.
gl.stencil_op_separate takes three arguments that indicate what happens to
the stored stencil value while stenciling is enabled. If the stencil test
fails, no change is made to the pixel's color or depth buffers, and sfail
specifies what happens to the stencil buffer contents. The following eight
actions are possible.
Args:
face: whether front and/or back stencil state is updated.
sfail: the action to take when the stencil test fails.
dpfail: the stencil action when the stencil test passes, but the depth
test fails.
dppass: the stencil action when both the stencil test and the depth test
pass, or when the stencil test passes and either there is no depth
buffer or depth testing is not enabled.
'''
@accepts(t.enum, t.enum, t.int, t.uint)
@returns(t.void)
@binds(dll)
def stencil_func_separate(face, func, ref, mask):
'''
set front and/or back function and reference value for stencil testing.
Args:
face: whether front and/or back stencil state is updated.
func: the test function.
ref: the reference value for the stencil test.
mask: a mask that is anded with both the reference value and the stored
stencil value when the test is done.
'''
@accepts(t.enum, t.uint)
@returns(t.void)
@binds(dll)
def stencil_mask_separate(face, mask):
'''
control the front and/or back writing of individual bits in the stencil
planes.
gl.stencil_mask_separate controls the writing of individual bits in the
stencil planes. The least significant n bits of mask, where.
Args:
face: whether the front and/or back stencil writemask is updated.
mask: a bit mask to enable and disable writing of individual bits in the
stencil planes.
'''
@accepts(t.uint, t.uint)
@returns(t.void)
@binds(dll)
def attach_shader(program, shader):
'''
Attaches a shader object to a program object.
Args:
program: the program object to which a shader object will be attached.
shader: the shader object that is to be attached.
'''
@accepts(t.uint, t.uint, t.char_p)
@returns(t.void)
@binds(dll)
def bind_attrib_location(program, index, name):
'''
Associates a generic vertex attribute index with a named attribute variable.
gl.bind_attrib_location is used to associate a user-defined attribute
variable in the program object specified by program with a generic vertex
attribute index. The name of the user-defined attribute variable is passed
as a null terminated string in name. The generic vertex attribute index to
be bound to this variable is specified by index. When program is made part
of current state, values provided via the generic vertex attribute index
will modify the value of the user-defined attribute variable specified by
name.
Args:
program: the handle of the program object in which the association is to
be made.
index: the index of the generic vertex attribute to be bound.
name: a null terminated string containing the name of the vertex shader
attribute variable to which index is to be bound.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def compile_shader(shader):
'''
Compiles a shader object.
gl.compile_shader compiles the source code strings that have been stored in
the shader object specified by shader.
Args:
shader: the shader object to be compiled.
'''
@accepts()
@returns(t.uint)
@binds(dll)
def create_program():
'''
Creates a program object.
'''
@accepts(t.enum)
@returns(t.uint)
@binds(dll)
def create_shader(type):
'''
Creates a shader object.
gl.create_shader creates an empty shader object and returns a non-zero value
by which it can be referenced. A shader object is used to maintain the
source code strings that define a shader. shaderType indicates the type of
shader to be created. Five types of shader are supported.
Args:
type: the type of shader to be created.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def delete_program(program):
'''
Deletes a program object.
gl.delete_program frees the memory and invalidates the name associated with
the program object specified by program. This command effectively undoes the
effects of a call to gl.create_program.
Args:
program: the program object to be deleted.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def delete_shader(shader):
'''
Deletes a shader object.
gl.delete_shader frees the memory and invalidates the name associated with
the shader object specified by shader. This command effectively undoes the
effects of a call to gl.create_shader.
Args:
shader: the shader object to be deleted.
'''
@accepts(t.uint, t.uint)
@returns(t.void)
@binds(dll)
def detach_shader(program, shader):
'''
Detaches a shader object from a program object to which it is attached.
gl.detach_shader detaches the shader object specified by shader from the
program object specified by program. This command can be used to undo the
effect of the command gl.attach_shader.
Args:
program: the program object from which to detach the shader object.
shader: the shader object to be detached.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def enable_vertex_attrib_array(index):
'''
Enable or disable a generic vertex attribute array.
gl.enable_vertex_attrib_array and gl.enable_vertex_array_attrib enable the
generic vertex attribute array specified by index.
gl.enable_vertex_attrib_array uses currently bound vertex array object for
the operation, whereas gl.enable_vertex_array_attrib updates state of the
vertex array object with ID vaobj.
Args:
index: the index of the generic vertex attribute to be enabled or
disabled.
'''
@accepts(t.uint, t.uint, t.sizei, POINTER(t.sizei), POINTER(t.int), POINTER(t.enum), t.char_p)
@returns(t.void)
@binds(dll)
def get_active_attrib(program, index, bufsize, length, size, type, name):
'''
Returns information about an active attribute variable for the specified
program object.
gl.get_active_attrib returns information about an active attribute variable
in the program object specified by program. The number of active attributes
can be obtained by calling gl.get_program with the value
gl.ACTIVE_ATTRIBUTES. A value of 0 for index selects the first active
attribute variable. Permissible values for index range from zero to the
number of active attribute variables minus one.
Args:
program: the program object to be queried.
index: the index of the attribute variable to be queried.
bufsize: the maximum number of characters opengl is allowed to write in
the character buffer indicated by name.
length: returns the number of characters actually written by opengl in
the string indicated by name (excluding the null terminator) if a
value other than null is passed.
size: returns the size of the attribute variable.
type: returns the data type of the attribute variable.
name: returns a null terminated string containing the name of the
attribute variable.
'''
@accepts(t.uint, t.uint, t.sizei, POINTER(t.sizei), POINTER(t.int), POINTER(t.enum), t.char_p)
@returns(t.void)
@binds(dll)
def get_active_uniform(program, index, bufsize, length, size, type, name):
'''
Returns information about an active uniform variable for the specified
program object.
gl.get_active_uniform returns information about an active uniform variable
in the program object specified by program. The number of active uniform
variables can be obtained by calling gl.get_program with the value
gl.ACTIVE_UNIFORMS. A value of 0 for index selects the first active uniform
variable. Permissible values for index range from zero to the number of
active uniform variables minus one.
Args:
program: the program object to be queried.
index: the index of the uniform variable to be queried.
bufsize: the maximum number of characters opengl is allowed to write in
the character buffer indicated by name.
length: returns the number of characters actually written by opengl in
the string indicated by name (excluding the null terminator) if a
value other than null is passed.
size: returns the size of the uniform variable.
type: returns the data type of the uniform variable.
name: returns a null terminated string containing the name of the
uniform variable.
'''
@accepts(t.uint, t.sizei, POINTER(t.sizei), POINTER(t.uint))
@returns(t.void)
@binds(dll)
def get_attached_shaders(program, maxcount, count, shaders):
'''
Returns the handles of the shader objects attached to a program object.
gl.get_attached_shaders returns the names of the shader objects attached to
program. The names of shader objects that are attached to program will be
returned in shaders. The actual number of shader names written into shaders
is returned in count. If no shader objects are attached to program, count is
set to 0.
Args:
program: the program object to be queried.
maxcount: the size of the array for storing the returned object names.
count: returns the number of names actually returned in shaders.
shaders: an array that is used to return the names of attached shader
objects.
'''
@accepts(t.uint, t.char_p)
@returns(t.int)
@binds(dll)
def get_attrib_location(program, name):
'''
Returns the location of an attribute variable.
gl.get_attrib_location queries the previously linked program object
specified by program for the attribute variable specified by name and
returns the index of the generic vertex attribute that is bound to that
attribute variable. If name is a matrix attribute variable, the index of the
first column of the matrix is returned. If the named attribute variable is
not an active attribute in the specified program object or if name starts
with the reserved prefix "gl_", a value of -1 is returned.
Args:
program: the program object to be queried.
name: points to a null terminated string containing the name of the
attribute variable whose location is to be queried.
'''
@accepts(t.uint, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_program_info_log(program, bufsize, length, infolog):
'''
Returns the information log for a program object.
gl.get_program_info_log returns the information log for the specified
program object. The information log for a program object is modified when
the program object is linked or validated. The string that is returned will
be null terminated.
Args:
program: the program object whose information log is to be queried.
bufsize: the size of the character buffer for storing the returned
information log.
length: returns the length of the string returned in infolog (excluding
the null terminator).
infolog: an array of characters that is used to return the information
log.
'''
@accepts(t.uint, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_shader_info_log(shader, bufsize, length, infolog):
'''
Returns the information log for a shader object.
gl.get_shader_info_log returns the information log for the specified shader
object. The information log for a shader object is modified when the shader
is compiled. The string that is returned will be null terminated.
Args:
shader: the shader object whose information log is to be queried.
bufsize: the size of the character buffer for storing the returned
information log.
length: returns the length of the string returned in infolog (excluding
the null terminator).
infolog: an array of characters that is used to return the information
log.
'''
@accepts(t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_shader_source(shader, bufsize, length, source):
'''
Returns the source code string from a shader object.
gl.get_shader_source returns the concatenation of the source code strings
from the shader object specified by shader. The source code strings for a
shader object are the result of a previous call to gl.shader_source. The
string returned by the function will be null terminated.
Args:
shader: the shader object to be queried.
bufsize: the size of the character buffer for storing the returned
source code string.
length: returns the length of the string returned in source (excluding
the null terminator).
source: an array of characters that is used to return the source code
string.
'''
@accepts(t.uint, t.char_p)
@returns(t.int)
@binds(dll)
def get_uniform_location(program, name):
'''
Returns the location of a uniform variable.
gl.get_uniform_location returns an integer that represents the location of a
specific uniform variable within a program object. name must be a null
terminated string that contains no white space. name must be an active
uniform variable name in program that is not a structure, an array of
structures, or a subcomponent of a vector or a matrix. This function returns
-1 if name does not correspond to an active uniform variable in program, if
name starts with the reserved prefix "gl_", or if name is associated with an
atomic counter or a named uniform block.
Args:
program: the program object to be queried.
name: points to a null terminated string containing the name of the
uniform variable whose location is to be queried.
'''
@accepts(t.uint, t.int, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.int, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.enum, POINTER(t.double))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.enum, t.void)
@returns(t.void)
@binds(dll)
def get_vertex_attrib_pointerv(index, pname, pointer):
'''
return the address of the specified generic vertex attribute pointer.
gl.get_vertex_attrib_pointerv returns pointer information. index is the
generic vertex attribute to be queried, pname is a symbolic constant
indicating the pointer to be returned, and params is a pointer to a location
in which to place the returned data.
Args:
index: the generic vertex attribute parameter to be returned.
pname: the symbolic name of the generic vertex attribute parameter to be
returned.
pointer: returns the pointer value.
'''
@accepts(t.uint)
@returns(t.boolean)
@binds(dll)
def is_program(program):
'''
Determines if a name corresponds to a program object.
gl.is_program returns gl.TRUE if program is the name of a program object
previously created with gl.create_program and not yet deleted with
gl.delete_program. If program is zero or a non-zero value that is not the
name of a program object, or if an error occurs, gl.is_program returns
gl.FALSE.
Args:
program: a potential program object.
'''
@accepts(t.uint)
@returns(t.boolean)
@binds(dll)
def is_shader(shader):
'''
Determines if a name corresponds to a shader object.
gl.is_shader returns gl.TRUE if shader is the name of a shader object
previously created with gl.create_shader and not yet deleted with
gl.delete_shader. If shader is zero or a non-zero value that is not the name
of a shader object, or if an error occurs, gl.is_shader returns gl.FALSE.
Args:
shader: a potential shader object.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def link_program(program):
'''
Links a program object.
gl.link_program links the program object specified by program. If any shader
objects of type gl.VERTEX_SHADER are attached to program, they will be used
to create an executable that will run on the programmable vertex processor.
If any shader objects of type gl.GEOMETRY_SHADER are attached to program,
they will be used to create an executable that will run on the programmable
geometry processor. If any shader objects of type gl.FRAGMENT_SHADER are
attached to program, they will be used to create an executable that will run
on the programmable fragment processor.
Args:
program: the handle of the program object to be linked.
'''
@accepts(t.uint, t.sizei, POINTER(t.char_p), POINTER(t.int))
@returns(t.void)
@binds(dll)
def shader_source(shader, count, string, length):
'''
Replaces the source code in a shader object.
gl.shader_source sets the source code in shader to the source code in the
array of strings specified by string. Any source code previously stored in
the shader object is completely replaced. The number of strings in the array
is specified by count. If length is None, each string is assumed to be null
terminated.
Args:
shader: the handle of the shader object whose source code is to be
replaced.
count: the number of elements in the string and length arrays.
string: an array of pointers to strings containing the source code to be
loaded into the shader.
length: an array of string lengths.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def use_program(program):
'''
Installs a program object as part of current rendering state.
gl.use_program installs the program object specified by program as part of
current rendering state. One or more executables are created in a program
object by successfully attaching shader objects to it with gl.attach_shader,
successfully compiling the shader objects with gl.compile_shader, and
successfully linking the program object with gl.link_program.
Args:
program: the handle of the program object whose executables are to be
used as part of current rendering state.
'''
@accepts(t.int, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.float, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.int)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.int, t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, t.boolean, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, t.boolean, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.int, t.sizei, t.boolean, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def validate_program(program):
'''
Validates a program object.
gl.validate_program checks to see whether the executables contained in
program can execute given the current OpenGL state. The information
generated by the validation process will be stored in program's information
log. The validation information may consist of an empty string, or it may be
a string containing information about how the current program object
interacts with the rest of current OpenGL state. This provides a way for
OpenGL implementers to convey more information about why the current program
is inefficient, suboptimal, failing to execute, and so on.
Args:
program: the handle of the program object to be validated.
'''
@accepts(t.uint, t.double)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.short)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.double, t.double)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.float, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.short, t.short)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.double, t.double, t.double)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.short, t.short, t.short)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.byte))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.ubyte, t.ubyte, t.ubyte, t.ubyte)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.ubyte))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.uint))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.ushort))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.byte))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.double, t.double, t.double, t.double)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.int))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.short, t.short, t.short, t.short)
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.ubyte))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.uint))
@returns(t.void)
@binds(dll)
@accepts(t.uint, POINTER(t.ushort))
@returns(t.void)
@binds(dll)
@accepts(t.uint, t.int, t.enum, t.boolean, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def vertex_attrib_pointer(index, size, type, normalized, stride, pointer):
'''
define an array of generic vertex attribute data.
gl.vertex_attrib_pointer, gl.vertex_attrib_i_pointer and
gl.vertex_attrib_l_pointer specify the location and data format of the array
of generic vertex attributes at index index to use when rendering. size
specifies the number of components per attribute and must be 1, 2, 3, 4, or
gl.BGRA. type specifies the data type of each component, and stride
specifies the byte stride from one attribute to the next, allowing vertices
and attributes to be packed into a single array or stored in separate
arrays.
Args:
index: the index of the generic vertex attribute to be modified.
size: the number of components per generic vertex attribute.
type: the data type of each component in the array.
normalized: for glvertexattribpointer, specifies whether fixed-point
data values should be normalized (gl_true) or converted directly as
fixed-point values (gl_false) when they are accessed.
stride: the byte offset between consecutive generic vertex attributes.
pointer: a offset of the first component of the first generic vertex
attribute in the array in the data store of the buffer currently
bound to the gl_array_buffer target.
'''
BLEND_EQUATION_RGB = 0x8009
VERTEX_ATTRIB_ARRAY_ENABLED = 0x8622
VERTEX_ATTRIB_ARRAY_SIZE = 0x8623
VERTEX_ATTRIB_ARRAY_STRIDE = 0x8624
VERTEX_ATTRIB_ARRAY_TYPE = 0x8625
CURRENT_VERTEX_ATTRIB = 0x8626
VERTEX_PROGRAM_POINT_SIZE = 0x8642
VERTEX_ATTRIB_ARRAY_POINTER = 0x8645
STENCIL_BACK_FUNC = 0x8800
STENCIL_BACK_FAIL = 0x8801
STENCIL_BACK_PASS_DEPTH_FAIL = 0x8802
STENCIL_BACK_PASS_DEPTH_PASS = 0x8803
MAX_DRAW_BUFFERS = 0x8824
DRAW_BUFFER0 = 0x8825
DRAW_BUFFER1 = 0x8826
DRAW_BUFFER2 = 0x8827
DRAW_BUFFER3 = 0x8828
DRAW_BUFFER4 = 0x8829
DRAW_BUFFER5 = 0x882A
DRAW_BUFFER6 = 0x882B
DRAW_BUFFER7 = 0x882C
DRAW_BUFFER8 = 0x882D
DRAW_BUFFER9 = 0x882E
DRAW_BUFFER10 = 0x882F
DRAW_BUFFER11 = 0x8830
DRAW_BUFFER12 = 0x8831
DRAW_BUFFER13 = 0x8832
DRAW_BUFFER14 = 0x8833
DRAW_BUFFER15 = 0x8834
BLEND_EQUATION_ALPHA = 0x883D
MAX_VERTEX_ATTRIBS = 0x8869
VERTEX_ATTRIB_ARRAY_NORMALIZED = 0x886A
MAX_TEXTURE_IMAGE_UNITS = 0x8872
FRAGMENT_SHADER = 0x8B30
VERTEX_SHADER = 0x8B31
MAX_FRAGMENT_UNIFORM_COMPONENTS = 0x8B49
MAX_VERTEX_UNIFORM_COMPONENTS = 0x8B4A
MAX_VARYING_FLOATS = 0x8B4B
MAX_VERTEX_TEXTURE_IMAGE_UNITS = 0x8B4C
MAX_COMBINED_TEXTURE_IMAGE_UNITS = 0x8B4D
SHADER_TYPE = 0x8B4F
FLOAT_VEC2 = 0x8B50
FLOAT_VEC3 = 0x8B51
FLOAT_VEC4 = 0x8B52
INT_VEC2 = 0x8B53
INT_VEC3 = 0x8B54
INT_VEC4 = 0x8B55
BOOL = 0x8B56
BOOL_VEC2 = 0x8B57
BOOL_VEC3 = 0x8B58
BOOL_VEC4 = 0x8B59
FLOAT_MAT2 = 0x8B5A
FLOAT_MAT3 = 0x8B5B
FLOAT_MAT4 = 0x8B5C
SAMPLER_1D = 0x8B5D
SAMPLER_2D = 0x8B5E
SAMPLER_3D = 0x8B5F
SAMPLER_CUBE = 0x8B60
SAMPLER_1D_SHADOW = 0x8B61
SAMPLER_2D_SHADOW = 0x8B62
DELETE_STATUS = 0x8B80
COMPILE_STATUS = 0x8B81
LINK_STATUS = 0x8B82
VALIDATE_STATUS = 0x8B83
INFO_LOG_LENGTH = 0x8B84
ATTACHED_SHADERS = 0x8B85
ACTIVE_UNIFORMS = 0x8B86
ACTIVE_UNIFORM_MAX_LENGTH = 0x8B87
SHADER_SOURCE_LENGTH = 0x8B88
ACTIVE_ATTRIBUTES = 0x8B89
ACTIVE_ATTRIBUTE_MAX_LENGTH = 0x8B8A
FRAGMENT_SHADER_DERIVATIVE_HINT = 0x8B8B
SHADING_LANGUAGE_VERSION = 0x8B8C
CURRENT_PROGRAM = 0x8B8D
POINT_SPRITE_COORD_ORIGIN = 0x8CA0
LOWER_LEFT = 0x8CA1
UPPER_LEFT = 0x8CA2
STENCIL_BACK_REF = 0x8CA3
STENCIL_BACK_VALUE_MASK = 0x8CA4
STENCIL_BACK_WRITEMASK = 0x8CA5
VERTEX_PROGRAM_TWO_SIDE = 0x8643
POINT_SPRITE = 0x8861
COORD_REPLACE = 0x8862
MAX_TEXTURE_COORDS = 0x8871 | 30.675287 | 94 | 0.714379 | #BEWARE: automatically generated code
#This code was generated by /generate/__main__.py
from opengl.gl.raw.bindings import *
@accepts(t.enum, t.enum)
@returns(t.void)
@binds(dll)
def blend_equation_separate(modergb, modealpha):
'''
set the RGB blend equation and the alpha blend equation separately.
Args:
modergb: the rgb blend equation, how the red, green, and blue components
of the source and destination colors are combined.
modealpha: the alpha blend equation, how the alpha component of the
source and destination colors are combined.
'''
@accepts(t.sizei, POINTER(t.enum))
@returns(t.void)
@binds(dll)
def draw_buffers(n, bufs):
'''
Specifies a list of color buffers to be drawn into.
gl.draw_buffers and gl.named_framebuffer_draw_buffers define an array of
buffers into which outputs from the fragment shader data will be written. If
a fragment shader writes a value to one or more user defined output
variables, then the value of each variable will be written into the buffer
specified at a location within bufs corresponding to the location assigned
to that user defined output. The draw buffer used for user defined outputs
assigned to locations greater than or equal to n is implicitly set to
gl.NONE and any data written to such an output is discarded.
Args:
n: the number of buffers in bufs.
bufs: points to an array of symbolic constants specifying the buffers
into which fragment colors or data values will be written.
'''
@accepts(t.enum, t.enum, t.enum, t.enum)
@returns(t.void)
@binds(dll)
def stencil_op_separate(face, sfail, dpfail, dppass):
'''
set front and/or back stencil test actions.
gl.stencil_op_separate takes three arguments that indicate what happens to
the stored stencil value while stenciling is enabled. If the stencil test
fails, no change is made to the pixel's color or depth buffers, and sfail
specifies what happens to the stencil buffer contents. The following eight
actions are possible.
Args:
face: whether front and/or back stencil state is updated.
sfail: the action to take when the stencil test fails.
dpfail: the stencil action when the stencil test passes, but the depth
test fails.
dppass: the stencil action when both the stencil test and the depth test
pass, or when the stencil test passes and either there is no depth
buffer or depth testing is not enabled.
'''
@accepts(t.enum, t.enum, t.int, t.uint)
@returns(t.void)
@binds(dll)
def stencil_func_separate(face, func, ref, mask):
'''
set front and/or back function and reference value for stencil testing.
Args:
face: whether front and/or back stencil state is updated.
func: the test function.
ref: the reference value for the stencil test.
mask: a mask that is anded with both the reference value and the stored
stencil value when the test is done.
'''
@accepts(t.enum, t.uint)
@returns(t.void)
@binds(dll)
def stencil_mask_separate(face, mask):
'''
control the front and/or back writing of individual bits in the stencil
planes.
gl.stencil_mask_separate controls the writing of individual bits in the
stencil planes. The least significant n bits of mask, where.
Args:
face: whether the front and/or back stencil writemask is updated.
mask: a bit mask to enable and disable writing of individual bits in the
stencil planes.
'''
@accepts(t.uint, t.uint)
@returns(t.void)
@binds(dll)
def attach_shader(program, shader):
'''
Attaches a shader object to a program object.
Args:
program: the program object to which a shader object will be attached.
shader: the shader object that is to be attached.
'''
@accepts(t.uint, t.uint, t.char_p)
@returns(t.void)
@binds(dll)
def bind_attrib_location(program, index, name):
'''
Associates a generic vertex attribute index with a named attribute variable.
gl.bind_attrib_location is used to associate a user-defined attribute
variable in the program object specified by program with a generic vertex
attribute index. The name of the user-defined attribute variable is passed
as a null terminated string in name. The generic vertex attribute index to
be bound to this variable is specified by index. When program is made part
of current state, values provided via the generic vertex attribute index
will modify the value of the user-defined attribute variable specified by
name.
Args:
program: the handle of the program object in which the association is to
be made.
index: the index of the generic vertex attribute to be bound.
name: a null terminated string containing the name of the vertex shader
attribute variable to which index is to be bound.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def compile_shader(shader):
'''
Compiles a shader object.
gl.compile_shader compiles the source code strings that have been stored in
the shader object specified by shader.
Args:
shader: the shader object to be compiled.
'''
@accepts()
@returns(t.uint)
@binds(dll)
def create_program():
'''
Creates a program object.
'''
@accepts(t.enum)
@returns(t.uint)
@binds(dll)
def create_shader(type):
'''
Creates a shader object.
gl.create_shader creates an empty shader object and returns a non-zero value
by which it can be referenced. A shader object is used to maintain the
source code strings that define a shader. shaderType indicates the type of
shader to be created. Five types of shader are supported.
Args:
type: the type of shader to be created.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def delete_program(program):
'''
Deletes a program object.
gl.delete_program frees the memory and invalidates the name associated with
the program object specified by program. This command effectively undoes the
effects of a call to gl.create_program.
Args:
program: the program object to be deleted.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def delete_shader(shader):
'''
Deletes a shader object.
gl.delete_shader frees the memory and invalidates the name associated with
the shader object specified by shader. This command effectively undoes the
effects of a call to gl.create_shader.
Args:
shader: the shader object to be deleted.
'''
@accepts(t.uint, t.uint)
@returns(t.void)
@binds(dll)
def detach_shader(program, shader):
'''
Detaches a shader object from a program object to which it is attached.
gl.detach_shader detaches the shader object specified by shader from the
program object specified by program. This command can be used to undo the
effect of the command gl.attach_shader.
Args:
program: the program object from which to detach the shader object.
shader: the shader object to be detached.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def disable_vertex_attrib_array(index):
pass
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def enable_vertex_attrib_array(index):
'''
Enable or disable a generic vertex attribute array.
gl.enable_vertex_attrib_array and gl.enable_vertex_array_attrib enable the
generic vertex attribute array specified by index.
gl.enable_vertex_attrib_array uses currently bound vertex array object for
the operation, whereas gl.enable_vertex_array_attrib updates state of the
vertex array object with ID vaobj.
Args:
index: the index of the generic vertex attribute to be enabled or
disabled.
'''
@accepts(t.uint, t.uint, t.sizei, POINTER(t.sizei), POINTER(t.int), POINTER(t.enum), t.char_p)
@returns(t.void)
@binds(dll)
def get_active_attrib(program, index, bufsize, length, size, type, name):
'''
Returns information about an active attribute variable for the specified
program object.
gl.get_active_attrib returns information about an active attribute variable
in the program object specified by program. The number of active attributes
can be obtained by calling gl.get_program with the value
gl.ACTIVE_ATTRIBUTES. A value of 0 for index selects the first active
attribute variable. Permissible values for index range from zero to the
number of active attribute variables minus one.
Args:
program: the program object to be queried.
index: the index of the attribute variable to be queried.
bufsize: the maximum number of characters opengl is allowed to write in
the character buffer indicated by name.
length: returns the number of characters actually written by opengl in
the string indicated by name (excluding the null terminator) if a
value other than null is passed.
size: returns the size of the attribute variable.
type: returns the data type of the attribute variable.
name: returns a null terminated string containing the name of the
attribute variable.
'''
@accepts(t.uint, t.uint, t.sizei, POINTER(t.sizei), POINTER(t.int), POINTER(t.enum), t.char_p)
@returns(t.void)
@binds(dll)
def get_active_uniform(program, index, bufsize, length, size, type, name):
'''
Returns information about an active uniform variable for the specified
program object.
gl.get_active_uniform returns information about an active uniform variable
in the program object specified by program. The number of active uniform
variables can be obtained by calling gl.get_program with the value
gl.ACTIVE_UNIFORMS. A value of 0 for index selects the first active uniform
variable. Permissible values for index range from zero to the number of
active uniform variables minus one.
Args:
program: the program object to be queried.
index: the index of the uniform variable to be queried.
bufsize: the maximum number of characters opengl is allowed to write in
the character buffer indicated by name.
length: returns the number of characters actually written by opengl in
the string indicated by name (excluding the null terminator) if a
value other than null is passed.
size: returns the size of the uniform variable.
type: returns the data type of the uniform variable.
name: returns a null terminated string containing the name of the
uniform variable.
'''
@accepts(t.uint, t.sizei, POINTER(t.sizei), POINTER(t.uint))
@returns(t.void)
@binds(dll)
def get_attached_shaders(program, maxcount, count, shaders):
'''
Returns the handles of the shader objects attached to a program object.
gl.get_attached_shaders returns the names of the shader objects attached to
program. The names of shader objects that are attached to program will be
returned in shaders. The actual number of shader names written into shaders
is returned in count. If no shader objects are attached to program, count is
set to 0.
Args:
program: the program object to be queried.
maxcount: the size of the array for storing the returned object names.
count: returns the number of names actually returned in shaders.
shaders: an array that is used to return the names of attached shader
objects.
'''
@accepts(t.uint, t.char_p)
@returns(t.int)
@binds(dll)
def get_attrib_location(program, name):
'''
Returns the location of an attribute variable.
gl.get_attrib_location queries the previously linked program object
specified by program for the attribute variable specified by name and
returns the index of the generic vertex attribute that is bound to that
attribute variable. If name is a matrix attribute variable, the index of the
first column of the matrix is returned. If the named attribute variable is
not an active attribute in the specified program object or if name starts
with the reserved prefix "gl_", a value of -1 is returned.
Args:
program: the program object to be queried.
name: points to a null terminated string containing the name of the
attribute variable whose location is to be queried.
'''
@accepts(t.uint, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_programiv(program, pname, params):
pass
@accepts(t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_program_info_log(program, bufsize, length, infolog):
'''
Returns the information log for a program object.
gl.get_program_info_log returns the information log for the specified
program object. The information log for a program object is modified when
the program object is linked or validated. The string that is returned will
be null terminated.
Args:
program: the program object whose information log is to be queried.
bufsize: the size of the character buffer for storing the returned
information log.
length: returns the length of the string returned in infolog (excluding
the null terminator).
infolog: an array of characters that is used to return the information
log.
'''
@accepts(t.uint, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_shaderiv(shader, pname, params):
pass
@accepts(t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_shader_info_log(shader, bufsize, length, infolog):
'''
Returns the information log for a shader object.
gl.get_shader_info_log returns the information log for the specified shader
object. The information log for a shader object is modified when the shader
is compiled. The string that is returned will be null terminated.
Args:
shader: the shader object whose information log is to be queried.
bufsize: the size of the character buffer for storing the returned
information log.
length: returns the length of the string returned in infolog (excluding
the null terminator).
infolog: an array of characters that is used to return the information
log.
'''
@accepts(t.uint, t.sizei, POINTER(t.sizei), t.char_p)
@returns(t.void)
@binds(dll)
def get_shader_source(shader, bufsize, length, source):
'''
Returns the source code string from a shader object.
gl.get_shader_source returns the concatenation of the source code strings
from the shader object specified by shader. The source code strings for a
shader object are the result of a previous call to gl.shader_source. The
string returned by the function will be null terminated.
Args:
shader: the shader object to be queried.
bufsize: the size of the character buffer for storing the returned
source code string.
length: returns the length of the string returned in source (excluding
the null terminator).
source: an array of characters that is used to return the source code
string.
'''
@accepts(t.uint, t.char_p)
@returns(t.int)
@binds(dll)
def get_uniform_location(program, name):
'''
Returns the location of a uniform variable.
gl.get_uniform_location returns an integer that represents the location of a
specific uniform variable within a program object. name must be a null
terminated string that contains no white space. name must be an active
uniform variable name in program that is not a structure, an array of
structures, or a subcomponent of a vector or a matrix. This function returns
-1 if name does not correspond to an active uniform variable in program, if
name starts with the reserved prefix "gl_", or if name is associated with an
atomic counter or a named uniform block.
Args:
program: the program object to be queried.
name: points to a null terminated string containing the name of the
uniform variable whose location is to be queried.
'''
@accepts(t.uint, t.int, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_uniformfv(program, location, params):
pass
@accepts(t.uint, t.int, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_uniformiv(program, location, params):
pass
@accepts(t.uint, t.enum, POINTER(t.double))
@returns(t.void)
@binds(dll)
def get_vertex_attribdv(index, pname, params):
pass
@accepts(t.uint, t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def get_vertex_attribfv(index, pname, params):
pass
@accepts(t.uint, t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def get_vertex_attribiv(index, pname, params):
pass
@accepts(t.uint, t.enum, t.void)
@returns(t.void)
@binds(dll)
def get_vertex_attrib_pointerv(index, pname, pointer):
'''
return the address of the specified generic vertex attribute pointer.
gl.get_vertex_attrib_pointerv returns pointer information. index is the
generic vertex attribute to be queried, pname is a symbolic constant
indicating the pointer to be returned, and params is a pointer to a location
in which to place the returned data.
Args:
index: the generic vertex attribute parameter to be returned.
pname: the symbolic name of the generic vertex attribute parameter to be
returned.
pointer: returns the pointer value.
'''
@accepts(t.uint)
@returns(t.boolean)
@binds(dll)
def is_program(program):
'''
Determines if a name corresponds to a program object.
gl.is_program returns gl.TRUE if program is the name of a program object
previously created with gl.create_program and not yet deleted with
gl.delete_program. If program is zero or a non-zero value that is not the
name of a program object, or if an error occurs, gl.is_program returns
gl.FALSE.
Args:
program: a potential program object.
'''
@accepts(t.uint)
@returns(t.boolean)
@binds(dll)
def is_shader(shader):
'''
Determines if a name corresponds to a shader object.
gl.is_shader returns gl.TRUE if shader is the name of a shader object
previously created with gl.create_shader and not yet deleted with
gl.delete_shader. If shader is zero or a non-zero value that is not the name
of a shader object, or if an error occurs, gl.is_shader returns gl.FALSE.
Args:
shader: a potential shader object.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def link_program(program):
'''
Links a program object.
gl.link_program links the program object specified by program. If any shader
objects of type gl.VERTEX_SHADER are attached to program, they will be used
to create an executable that will run on the programmable vertex processor.
If any shader objects of type gl.GEOMETRY_SHADER are attached to program,
they will be used to create an executable that will run on the programmable
geometry processor. If any shader objects of type gl.FRAGMENT_SHADER are
attached to program, they will be used to create an executable that will run
on the programmable fragment processor.
Args:
program: the handle of the program object to be linked.
'''
@accepts(t.uint, t.sizei, POINTER(t.char_p), POINTER(t.int))
@returns(t.void)
@binds(dll)
def shader_source(shader, count, string, length):
'''
Replaces the source code in a shader object.
gl.shader_source sets the source code in shader to the source code in the
array of strings specified by string. Any source code previously stored in
the shader object is completely replaced. The number of strings in the array
is specified by count. If length is None, each string is assumed to be null
terminated.
Args:
shader: the handle of the shader object whose source code is to be
replaced.
count: the number of elements in the string and length arrays.
string: an array of pointers to strings containing the source code to be
loaded into the shader.
length: an array of string lengths.
'''
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def use_program(program):
'''
Installs a program object as part of current rendering state.
gl.use_program installs the program object specified by program as part of
current rendering state. One or more executables are created in a program
object by successfully attaching shader objects to it with gl.attach_shader,
successfully compiling the shader objects with gl.compile_shader, and
successfully linking the program object with gl.link_program.
Args:
program: the handle of the program object whose executables are to be
used as part of current rendering state.
'''
@accepts(t.int, t.float)
@returns(t.void)
@binds(dll)
def uniform1f(location, v0):
pass
@accepts(t.int, t.float, t.float)
@returns(t.void)
@binds(dll)
def uniform2f(location, v0, v1):
pass
@accepts(t.int, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def uniform3f(location, v0, v1, v2):
pass
@accepts(t.int, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def uniform4f(location, v0, v1, v2, v3):
pass
@accepts(t.int, t.int)
@returns(t.void)
@binds(dll)
def uniform1i(location, v0):
pass
@accepts(t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
def uniform2i(location, v0, v1):
pass
@accepts(t.int, t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
def uniform3i(location, v0, v1, v2):
pass
@accepts(t.int, t.int, t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
def uniform4i(location, v0, v1, v2, v3):
pass
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform1fv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform2fv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform3fv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform4fv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
def uniform1iv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
def uniform2iv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
def uniform3iv(location, count, value):
pass
@accepts(t.int, t.sizei, POINTER(t.int))
@returns(t.void)
@binds(dll)
def uniform4iv(location, count, value):
pass
@accepts(t.int, t.sizei, t.boolean, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform_matrix2fv(location, count, transpose, value):
pass
@accepts(t.int, t.sizei, t.boolean, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform_matrix3fv(location, count, transpose, value):
pass
@accepts(t.int, t.sizei, t.boolean, POINTER(t.float))
@returns(t.void)
@binds(dll)
def uniform_matrix4fv(location, count, transpose, value):
pass
@accepts(t.uint)
@returns(t.void)
@binds(dll)
def validate_program(program):
'''
Validates a program object.
gl.validate_program checks to see whether the executables contained in
program can execute given the current OpenGL state. The information
generated by the validation process will be stored in program's information
log. The validation information may consist of an empty string, or it may be
a string containing information about how the current program object
interacts with the rest of current OpenGL state. This provides a way for
OpenGL implementers to convey more information about why the current program
is inefficient, suboptimal, failing to execute, and so on.
Args:
program: the handle of the program object to be validated.
'''
@accepts(t.uint, t.double)
@returns(t.void)
@binds(dll)
def vertex_attrib1d(index, x):
pass
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
def vertex_attrib1dv(index, v):
pass
@accepts(t.uint, t.float)
@returns(t.void)
@binds(dll)
def vertex_attrib1f(index, x):
pass
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
def vertex_attrib1fv(index, v):
pass
@accepts(t.uint, t.short)
@returns(t.void)
@binds(dll)
def vertex_attrib1s(index, x):
pass
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
def vertex_attrib1sv(index, v):
pass
@accepts(t.uint, t.double, t.double)
@returns(t.void)
@binds(dll)
def vertex_attrib2d(index, x, y):
pass
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
def vertex_attrib2dv(index, v):
pass
@accepts(t.uint, t.float, t.float)
@returns(t.void)
@binds(dll)
def vertex_attrib2f(index, x, y):
pass
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
def vertex_attrib2fv(index, v):
pass
@accepts(t.uint, t.short, t.short)
@returns(t.void)
@binds(dll)
def vertex_attrib2s(index, x, y):
pass
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
def vertex_attrib2sv(index, v):
pass
@accepts(t.uint, t.double, t.double, t.double)
@returns(t.void)
@binds(dll)
def vertex_attrib3d(index, x, y, z):
pass
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
def vertex_attrib3dv(index, v):
pass
@accepts(t.uint, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def vertex_attrib3f(index, x, y, z):
pass
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
def vertex_attrib3fv(index, v):
pass
@accepts(t.uint, t.short, t.short, t.short)
@returns(t.void)
@binds(dll)
def vertex_attrib3s(index, x, y, z):
pass
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
def vertex_attrib3sv(index, v):
pass
@accepts(t.uint, POINTER(t.byte))
@returns(t.void)
@binds(dll)
def vertex_attrib4_nbv(index, v):
pass
@accepts(t.uint, POINTER(t.int))
@returns(t.void)
@binds(dll)
def vertex_attrib4_niv(index, v):
pass
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
def vertex_attrib4_nsv(index, v):
pass
@accepts(t.uint, t.ubyte, t.ubyte, t.ubyte, t.ubyte)
@returns(t.void)
@binds(dll)
def vertex_attrib4_nub(index, x, y, z, w):
pass
@accepts(t.uint, POINTER(t.ubyte))
@returns(t.void)
@binds(dll)
def vertex_attrib4_nubv(index, v):
pass
@accepts(t.uint, POINTER(t.uint))
@returns(t.void)
@binds(dll)
def vertex_attrib4_nuiv(index, v):
pass
@accepts(t.uint, POINTER(t.ushort))
@returns(t.void)
@binds(dll)
def vertex_attrib4_nusv(index, v):
pass
@accepts(t.uint, POINTER(t.byte))
@returns(t.void)
@binds(dll)
def vertex_attrib4bv(index, v):
pass
@accepts(t.uint, t.double, t.double, t.double, t.double)
@returns(t.void)
@binds(dll)
def vertex_attrib4d(index, x, y, z, w):
pass
@accepts(t.uint, POINTER(t.double))
@returns(t.void)
@binds(dll)
def vertex_attrib4dv(index, v):
pass
@accepts(t.uint, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def vertex_attrib4f(index, x, y, z, w):
pass
@accepts(t.uint, POINTER(t.float))
@returns(t.void)
@binds(dll)
def vertex_attrib4fv(index, v):
pass
@accepts(t.uint, POINTER(t.int))
@returns(t.void)
@binds(dll)
def vertex_attrib4iv(index, v):
pass
@accepts(t.uint, t.short, t.short, t.short, t.short)
@returns(t.void)
@binds(dll)
def vertex_attrib4s(index, x, y, z, w):
pass
@accepts(t.uint, POINTER(t.short))
@returns(t.void)
@binds(dll)
def vertex_attrib4sv(index, v):
pass
@accepts(t.uint, POINTER(t.ubyte))
@returns(t.void)
@binds(dll)
def vertex_attrib4ubv(index, v):
pass
@accepts(t.uint, POINTER(t.uint))
@returns(t.void)
@binds(dll)
def vertex_attrib4uiv(index, v):
pass
@accepts(t.uint, POINTER(t.ushort))
@returns(t.void)
@binds(dll)
def vertex_attrib4usv(index, v):
pass
@accepts(t.uint, t.int, t.enum, t.boolean, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def vertex_attrib_pointer(index, size, type, normalized, stride, pointer):
'''
define an array of generic vertex attribute data.
gl.vertex_attrib_pointer, gl.vertex_attrib_i_pointer and
gl.vertex_attrib_l_pointer specify the location and data format of the array
of generic vertex attributes at index index to use when rendering. size
specifies the number of components per attribute and must be 1, 2, 3, 4, or
gl.BGRA. type specifies the data type of each component, and stride
specifies the byte stride from one attribute to the next, allowing vertices
and attributes to be packed into a single array or stored in separate
arrays.
Args:
index: the index of the generic vertex attribute to be modified.
size: the number of components per generic vertex attribute.
type: the data type of each component in the array.
normalized: for glvertexattribpointer, specifies whether fixed-point
data values should be normalized (gl_true) or converted directly as
fixed-point values (gl_false) when they are accessed.
stride: the byte offset between consecutive generic vertex attributes.
pointer: a offset of the first component of the first generic vertex
attribute in the array in the data store of the buffer currently
bound to the gl_array_buffer target.
'''
BLEND_EQUATION_RGB = 0x8009
VERTEX_ATTRIB_ARRAY_ENABLED = 0x8622
VERTEX_ATTRIB_ARRAY_SIZE = 0x8623
VERTEX_ATTRIB_ARRAY_STRIDE = 0x8624
VERTEX_ATTRIB_ARRAY_TYPE = 0x8625
CURRENT_VERTEX_ATTRIB = 0x8626
VERTEX_PROGRAM_POINT_SIZE = 0x8642
VERTEX_ATTRIB_ARRAY_POINTER = 0x8645
STENCIL_BACK_FUNC = 0x8800
STENCIL_BACK_FAIL = 0x8801
STENCIL_BACK_PASS_DEPTH_FAIL = 0x8802
STENCIL_BACK_PASS_DEPTH_PASS = 0x8803
MAX_DRAW_BUFFERS = 0x8824
DRAW_BUFFER0 = 0x8825
DRAW_BUFFER1 = 0x8826
DRAW_BUFFER2 = 0x8827
DRAW_BUFFER3 = 0x8828
DRAW_BUFFER4 = 0x8829
DRAW_BUFFER5 = 0x882A
DRAW_BUFFER6 = 0x882B
DRAW_BUFFER7 = 0x882C
DRAW_BUFFER8 = 0x882D
DRAW_BUFFER9 = 0x882E
DRAW_BUFFER10 = 0x882F
DRAW_BUFFER11 = 0x8830
DRAW_BUFFER12 = 0x8831
DRAW_BUFFER13 = 0x8832
DRAW_BUFFER14 = 0x8833
DRAW_BUFFER15 = 0x8834
BLEND_EQUATION_ALPHA = 0x883D
MAX_VERTEX_ATTRIBS = 0x8869
VERTEX_ATTRIB_ARRAY_NORMALIZED = 0x886A
MAX_TEXTURE_IMAGE_UNITS = 0x8872
FRAGMENT_SHADER = 0x8B30
VERTEX_SHADER = 0x8B31
MAX_FRAGMENT_UNIFORM_COMPONENTS = 0x8B49
MAX_VERTEX_UNIFORM_COMPONENTS = 0x8B4A
MAX_VARYING_FLOATS = 0x8B4B
MAX_VERTEX_TEXTURE_IMAGE_UNITS = 0x8B4C
MAX_COMBINED_TEXTURE_IMAGE_UNITS = 0x8B4D
SHADER_TYPE = 0x8B4F
FLOAT_VEC2 = 0x8B50
FLOAT_VEC3 = 0x8B51
FLOAT_VEC4 = 0x8B52
INT_VEC2 = 0x8B53
INT_VEC3 = 0x8B54
INT_VEC4 = 0x8B55
BOOL = 0x8B56
BOOL_VEC2 = 0x8B57
BOOL_VEC3 = 0x8B58
BOOL_VEC4 = 0x8B59
FLOAT_MAT2 = 0x8B5A
FLOAT_MAT3 = 0x8B5B
FLOAT_MAT4 = 0x8B5C
SAMPLER_1D = 0x8B5D
SAMPLER_2D = 0x8B5E
SAMPLER_3D = 0x8B5F
SAMPLER_CUBE = 0x8B60
SAMPLER_1D_SHADOW = 0x8B61
SAMPLER_2D_SHADOW = 0x8B62
DELETE_STATUS = 0x8B80
COMPILE_STATUS = 0x8B81
LINK_STATUS = 0x8B82
VALIDATE_STATUS = 0x8B83
INFO_LOG_LENGTH = 0x8B84
ATTACHED_SHADERS = 0x8B85
ACTIVE_UNIFORMS = 0x8B86
ACTIVE_UNIFORM_MAX_LENGTH = 0x8B87
SHADER_SOURCE_LENGTH = 0x8B88
ACTIVE_ATTRIBUTES = 0x8B89
ACTIVE_ATTRIBUTE_MAX_LENGTH = 0x8B8A
FRAGMENT_SHADER_DERIVATIVE_HINT = 0x8B8B
SHADING_LANGUAGE_VERSION = 0x8B8C
CURRENT_PROGRAM = 0x8B8D
POINT_SPRITE_COORD_ORIGIN = 0x8CA0
LOWER_LEFT = 0x8CA1
UPPER_LEFT = 0x8CA2
STENCIL_BACK_REF = 0x8CA3
STENCIL_BACK_VALUE_MASK = 0x8CA4
STENCIL_BACK_WRITEMASK = 0x8CA5
VERTEX_PROGRAM_TWO_SIDE = 0x8643
POINT_SPRITE = 0x8861
COORD_REPLACE = 0x8862
MAX_TEXTURE_COORDS = 0x8871 | 1,535 | 0 | 1,386 |
46197a584db2c1a4a57c3ce00e14574ba08eaec0 | 1,353 | py | Python | presearch_trrosetta/prepare/crawling.py | jobc90/Protein-Resarch | 0b3d9366cc66fdc50e791991c323de1ae7840a61 | [
"MIT"
] | null | null | null | presearch_trrosetta/prepare/crawling.py | jobc90/Protein-Resarch | 0b3d9366cc66fdc50e791991c323de1ae7840a61 | [
"MIT"
] | null | null | null | presearch_trrosetta/prepare/crawling.py | jobc90/Protein-Resarch | 0b3d9366cc66fdc50e791991c323de1ae7840a61 | [
"MIT"
] | 2 | 2021-06-29T00:06:50.000Z | 2021-06-29T04:21:49.000Z | from bs4 import BeautifulSoup
from urllib import request
from urllib.error import HTTPError
import tqdm
import os
import argparse
if __name__ == '__main__':
main() | 30.066667 | 86 | 0.617886 | from bs4 import BeautifulSoup
from urllib import request
from urllib.error import HTTPError
import tqdm
import os
import argparse
def get_download(url, fname):
try:
request.urlretrieve(url, fname)
except HTTPError as e:
print('error')
return
def get_casp_data(casps, download_path="./casp_data"):
for casp in casps:
os.makedirs(f'{download_path}', exist_ok=True)
html = request.urlopen(f'https://predictioncenter.org/{casp}/targetlist.cgi')
bsObject = BeautifulSoup(html, "html.parser")
target_fasta = []
for link in bsObject.find_all('a'):
if "www.rcsb.org" in link.get('href'):
# print(link.get(name))
target_fasta.append(link.get_text())
for name in tqdm.tqdm(set(target_fasta)):
name = name.upper()
download_link = f'https://files.rcsb.org/download/{name}.cif'
get_download(download_link, f'{download_path}/{name}.cif')
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--download_path', default='./cif')
args = parser.parse_args()
# todo : set the casp version.
casps = ['casp12', 'casp13', 'casp14']
get_casp_data(casps, args.download_path)
if __name__ == '__main__':
main() | 1,096 | 0 | 75 |
b413c46d5bf2d1d42f0aa6e600f5711c0f78b565 | 1,269 | py | Python | app/src/main/python/make_first_page.py | 108360224/watch_video | bfbcd0fbe617eceb974d8c1e9c976f47ad7b0814 | [
"MIT"
] | null | null | null | app/src/main/python/make_first_page.py | 108360224/watch_video | bfbcd0fbe617eceb974d8c1e9c976f47ad7b0814 | [
"MIT"
] | null | null | null | app/src/main/python/make_first_page.py | 108360224/watch_video | bfbcd0fbe617eceb974d8c1e9c976f47ad7b0814 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 2 11:26:18 2020
@author: max
"""
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import requests | 32.538462 | 117 | 0.62569 | # -*- coding: utf-8 -*-
"""
Created on Sat May 2 11:26:18 2020
@author: max
"""
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import requests
def make_first_page(src,text):
def cv2ImgAddText(img, text, left, top, textColor=(0, 0, 0), textSize=10):
if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype("font/simsun.ttc", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
try:
response = requests.get(src,timeout=3)
nparr = np.frombuffer(response.content, np.uint8)
im = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
except:
im=np.zeros((90,120,3), np.uint8)
im=cv2.resize(im,(90,120))
im=cv2.copyMakeBorder(src=im,left=0,right=0,top=0,bottom=25,borderType=cv2.BORDER_CONSTANT,value=[255, 255, 255])
tex=''
if len(text)>12:
if len(text)>24:
tex=text[:12]+'\n'+text[12:24]+'\n'+text[24:]
else:
tex=text
im=cv2ImgAddText(im,tex,2,121)
#cv2.imwrite("CV.jpg", im)
return im | 1,092 | 0 | 22 |
80326bb606fdfb5c3b5b1a0edb1e62da0bed7444 | 2,413 | py | Python | tests/config_test.py | alex/asciinema | ff23896174c07719d3b2ace6320a193934a0ac71 | [
"MIT"
] | 1 | 2015-11-08T13:00:51.000Z | 2015-11-08T13:00:51.000Z | tests/config_test.py | alex/asciinema | ff23896174c07719d3b2ace6320a193934a0ac71 | [
"MIT"
] | null | null | null | tests/config_test.py | alex/asciinema | ff23896174c07719d3b2ace6320a193934a0ac71 | [
"MIT"
] | null | null | null | from nose.tools import assert_equal
import os
import tempfile
import re
from asciinema.config import Config
| 32.608108 | 76 | 0.645669 | from nose.tools import assert_equal
import os
import tempfile
import re
from asciinema.config import Config
def create_config(content=None, overrides={}):
dir = tempfile.mkdtemp()
path = dir + '/config'
if content:
with open(path, 'w') as f:
f.write(content)
return Config(path, overrides)
class TestConfig(object):
def test_api_url_when_no_file_and_no_override_set(self):
config = create_config()
assert_equal('https://asciinema.org', config.api_url)
def test_api_url_when_no_url_set_and_no_override_set(self):
config = create_config('')
assert_equal('https://asciinema.org', config.api_url)
def test_api_url_when_url_set_and_no_override_set(self):
config = create_config("[api]\nurl = http://the/url")
assert_equal('http://the/url', config.api_url)
def test_api_url_when_url_set_and_override_set(self):
config = create_config("[api]\nurl = http://the/url", {
'ASCIINEMA_API_URL': 'http://the/url2' })
assert_equal('http://the/url2', config.api_url)
def test_api_token_when_no_file(self):
config = create_config()
assert re.match('^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', config.api_token)
assert os.path.isfile(config.path)
def test_api_token_when_no_dir(self):
config = create_config()
dir = os.path.dirname(config.path)
os.rmdir(dir)
assert re.match('^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', config.api_token)
assert os.path.isfile(config.path)
def test_api_token_when_no_api_token_set(self):
config = create_config('')
assert re.match('^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', config.api_token)
def test_api_token_when_api_token_set(self):
token = 'foo-bar-baz'
config = create_config("[api]\ntoken = %s" % token)
assert re.match(token, config.api_token)
def test_api_token_when_api_token_set_as_user_token(self):
token = 'foo-bar-baz'
config = create_config("[user]\ntoken = %s" % token)
assert re.match(token, config.api_token)
def test_api_token_when_api_token_set_and_user_token_set(self):
user_token = 'foo'
api_token = 'bar'
config = create_config("[user]\ntoken = %s\n[api]\ntoken = %s" %
(user_token, api_token))
assert re.match(api_token, config.api_token)
| 1,981 | 4 | 316 |
fcd925ed6692d34e5f41292cc768682829c1cd50 | 4,707 | py | Python | decoder/utils.py | Am473ur/HexQBot | 0c9605ec972ff43ce626a257bc087bf614379d6d | [
"Apache-2.0"
] | 20 | 2020-07-13T17:18:41.000Z | 2022-03-02T01:21:58.000Z | decoder/utils.py | Am473ur/HexQBot | 0c9605ec972ff43ce626a257bc087bf614379d6d | [
"Apache-2.0"
] | 1 | 2020-07-14T15:26:02.000Z | 2020-07-17T15:07:01.000Z | decoder/utils.py | Am473ur/HexQBot | 0c9605ec972ff43ce626a257bc087bf614379d6d | [
"Apache-2.0"
] | 2 | 2020-11-17T13:10:13.000Z | 2020-11-17T13:30:47.000Z | import requests
from base64 import b32encode, b32decode, b64encode, b64decode, b85encode, b85decode
from base58 import b58decode, b58encode
import base91
import hashlib
import json
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 3:
- `str` -> `str`
- `bytes or bytearray` -> decoded to `str`
"""
if not isinstance(s, (str, bytes, bytearray)):
raise TypeError("not expecting type '%s'" % type(s))
if isinstance(s, (bytes, bytearray)):
s = s.decode(encoding, errors)
return s
if __name__ == '__main__':
test()
| 37.959677 | 102 | 0.577438 | import requests
from base64 import b32encode, b32decode, b64encode, b64decode, b85encode, b85decode
from base58 import b58decode, b58encode
import base91
import hashlib
import json
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 3:
- `str` -> `str`
- `bytes or bytearray` -> decoded to `str`
"""
if not isinstance(s, (str, bytes, bytearray)):
raise TypeError("not expecting type '%s'" % type(s))
if isinstance(s, (bytes, bytearray)):
s = s.decode(encoding, errors)
return s
def check_ascii(s):
return all((c < 128 and c > 32) for c in s)
def int_to_ascii(s):
s_bytes = bytes.fromhex(('0' if len(hex(int(s))[2:])%2 else '') + hex(int(s))[2:])
try:
return [True, s_bytes.decode(), "Integer to String"]
except:
return [False, "Integer to String failed"]
def bin_to_ascii(s):
s_num = int(s, 2)
s_bytes = bytes.fromhex(('0' if len(hex(s_num)[2:])%2 else '') + hex(s_num)[2:])
if check_ascii(s_bytes):
return [True, s_bytes.decode(), "Binary to String"]
return [True, str(s_num), "Binary to Integer"]
def hashdb(s):
hashs = {32: "MD5", 40: "SHA1", 64: "SHA256", 96: "SHA384", 128: "SHA512"}
URL = "http://www.ttmd5.com/do.php?c=Decode&m=getMD5&md5={}".format(s)
try:
res = requests.post(URL)
res = json.loads(res.text)
except:
return [False, "{} decode failed".format(hashs[len(s)])]
if (res['flag'] == 1) and ("***" not in res['plain']):
hash_type = res['type'] if res['type'] != '' else 'md5'
return [True, res['plain'], hash_type.upper()+" decode"]
else:
return [False, "{} decode failed".format(hashs[len(s)])]
def base16_decode(s):
s_bytes = bytes.fromhex(('0' if len(s)%2 else '') + s)
try:
return [True, s_bytes.decode(), "Hex to String"]
except:
return [False, "Hex decode failed"]
def base32_decode(s):
try:
return [True, b32decode(s.encode()).decode(), "Base32 decode"]
except:
return [False, "Base32 decode failed"]
def base58_decode(s):
try:
return [True, b58decode(s.encode()).decode(), "Base58 decode"]
except:
return [False, "Base58 decode failed"]
def base64_decode(s):
try:
return [True, b64decode(s.encode()).decode(), "Base64 decode"]
except:
return [False, "Base64 decode failed"]
def base85_decode(s):
try:
return [True, b85decode(s.encode()).decode(), "Base85 decode"]
except:
return [False, "Base85 decode failed"]
def base91_decode(s):
try:
return [True, bytes(base91.decode(s)).decode(), "Base91 decode"]
except:
return [False, "Base91 decode failed"]
def test():
message = ensure_str("Hi~ i'm Hex...")
MD5 = hashlib.md5(b"123456").hexdigest()
md5_res = hashdb(MD5)
if md5_res[0] and md5_res[1] == "123456": print("\033[32m[+]\033[0mRequest hashdb successfully!")
else: print("\033[31m[!]\033[0mError: Cannot request hashdb!")
test_b16 = base16_decode(message.encode().hex())
if test_b16[0] and test_b16[1] == message: print("\033[32m[+]\033[0mSuccess:", test_b16[-1])
else: print("\033[31m[!]\033[0mError:", test_b16[-1])
test_b32 = base32_decode(b32encode(message.encode()).decode())
if test_b32[0] and test_b32[1] == message: print("\033[32m[+]\033[0mSuccess:", test_b32[-1])
else: print("\033[31m[!]\033[0mError:", test_b32[-1])
test_b58 = base58_decode(b58encode(message.encode()).decode())
if test_b58[0] and test_b58[1] == message: print("\033[32m[+]\033[0mSuccess:", test_b58[-1])
else: print("\033[31m[!]\033[0mError:", test_b58[-1])
test_b64 = base64_decode(b64encode(message.encode()).decode())
if test_b64[0] and test_b64[1] == message: print("\033[32m[+]\033[0mSuccess:", test_b64[-1])
else: print("\033[31m[!]\033[0mError:", test_b64[-1])
test_b85 = base85_decode(b85encode(message.encode()).decode())
if test_b85[0] and test_b85[1] == message: print("\033[32m[+]\033[0mSuccess:", test_b85[-1])
else: print("\033[31m[!]\033[0mError:", test_b85[-1])
test_b91 = base91_decode(base91.encode(message.encode()))
if test_b91[0] and test_b91[1] == message: print("\033[32m[+]\033[0mSuccess:", test_b91[-1])
else: print("\033[31m[!]\033[0mError:", test_b91[-1])
if __name__ == '__main__':
test()
| 3,839 | 0 | 253 |
51318ed4899cccb49c4022e5edbbaa06ce91410f | 6,879 | py | Python | model_neu/princeton/mod_1.py | lelange/cu-ssp | 9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f | [
"MIT"
] | null | null | null | model_neu/princeton/mod_1.py | lelange/cu-ssp | 9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f | [
"MIT"
] | null | null | null | model_neu/princeton/mod_1.py | lelange/cu-ssp | 9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f | [
"MIT"
] | null | null | null | """
Cascaded Convolution Model
- Pranav Shrestha (ps2958)
- Jeffrey Wan (jw3468)
"""
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.models import *
from keras.layers import *
from sklearn.model_selection import train_test_split, KFold
from keras.metrics import categorical_accuracy
from keras import backend as K
from keras.regularizers import l1, l2
import tensorflow as tf
import keras
from keras.callbacks import EarlyStopping ,ModelCheckpoint
from utils import *
### Data Retrieval
cb513filename = '../data/data_princeton/cb513.npy'
cb6133filteredfilename = '../data/data_princeton/cb6133filtered.npy'
maxlen_seq = r = 700 # protein residues padded to 700
f = 57 # number of features for each residue
#load train
train_df, X_aug_train = load_augmented_data(cb6133filteredfilename ,maxlen_seq)
train_input_seqs, train_target_seqs = train_df[['input', 'expected']][(train_df.len <= maxlen_seq)].values.T
#load test
test_df, X_aug_test = load_augmented_data(cb513filename,maxlen_seq)
test_input_seqs, test_target_seqs = test_df[['input','expected']][(test_df.len <= maxlen_seq)].values.T
#tokenizer
# Converting the inputs to trigrams
train_input_grams = seq2ngrams(train_input_seqs)
# Initializing and defining the tokenizer encoders and decoders based on the train set
tokenizer_encoder = Tokenizer()
tokenizer_encoder.fit_on_texts(train_input_grams)
tokenizer_decoder = Tokenizer(char_level = True)
tokenizer_decoder.fit_on_texts(train_target_seqs)
# Using the tokenizer to encode and decode the sequences for use in training
#train inputs
train_input_data = tokenizer_encoder.texts_to_sequences(train_input_grams)
X_train = sequence.pad_sequences(train_input_data, maxlen = maxlen_seq, padding = 'post')
#train targets
train_target_data = tokenizer_decoder.texts_to_sequences(train_target_seqs)
train_target_data = sequence.pad_sequences(train_target_data, maxlen = maxlen_seq, padding = 'post')
y_train = to_categorical(train_target_data)
#test inputs
test_input_grams = seq2ngrams(test_input_seqs)
test_input_data = tokenizer_encoder.texts_to_sequences(test_input_grams)
X_test = sequence.pad_sequences(test_input_data, maxlen = maxlen_seq, padding = 'post')
#test targets
test_target_data = tokenizer_decoder.texts_to_sequences(test_target_seqs)
test_target_data = sequence.pad_sequences(test_target_data, maxlen = maxlen_seq, padding = 'post')
y_test = to_categorical(test_target_data)
# Computing the number of words and number of tags
n_words = len(tokenizer_encoder.word_index) + 1
n_tags = len(tokenizer_decoder.word_index) + 1
#### validation data
n_samples = len(train_df)
np.random.seed(0)
validation_idx = np.random.choice(np.arange(n_samples), size=300, replace=False)
training_idx = np.array(list(set(np.arange(n_samples))-set(validation_idx)))
X_val = X_train[validation_idx]
X_train = X_train[training_idx]
y_val = y_train[validation_idx]
y_train = y_train[training_idx]
X_aug_val = X_aug_train[validation_idx]
X_aug_train = X_aug_train[training_idx]
#### end validation
p = {'activation1':[relu, softmax],
'activation2':[relu, softmax],
'optimizer': ['Nadam', "RMSprop"],
'losses': ['categorical_crossentropy', keras.losses.binary_crossentropy],
'first_hidden_layer': [10, 8, 6],
'second_hidden_layer': [2, 4, 6],
'batch_size': [64, 128, 10000],
'epochs': [50, 75]}
def train(X_train, y_train, X_val=None, y_val=None):
"""
Main Training function with the following properties:
Optimizer - Nadam
Loss function - Categorical Crossentropy
Batch Size - 128 (any more will exceed Collab GPU RAM)
Epochs - 50
"""
model = CNN_BIGRU()
model.compile(
optimizer="Nadam",
loss="categorical_crossentropy",
metrics=["accuracy", accuracy])
if X_val is not None and y_val is not None:
earlyStopping = EarlyStopping(monitor='val_accuracy', patience=10, verbose=1, mode='max')
checkpointer = ModelCheckpoint(filepath=load_file, monitor='val_accuracy', verbose=1, save_best_only=True,
mode='max')
# Training the model on the training data and validating using the validation set
history = model.fit(X_train, y_train, validation_data=(X_val, y_val),
epochs=75, batch_size=128, callbacks=[checkpointer, earlyStopping], verbose=1, shuffle=True)
else:
history = model.fit(X_train, y_train,
batch_size=128, epochs=75)
return history, model
""" Build model """
load_file = "./model/mod_1-CB513-"+datetime.now().strftime("%Y_%m_%d-%H_%M")+".h5"
history, model = train([X_train, X_aug_train], y_train, X_val=[X_val, X_aug_val], y_val=y_val)
model.load_weights(load_file)
print("####evaluate:")
score = model.evaluate([X_test,X_aug_test], y_test, verbose=2, batch_size=1)
print(score)
print ('test loss:', score[0])
print ('test accuracy:', score[2]) | 34.918782 | 120 | 0.725687 | """
Cascaded Convolution Model
- Pranav Shrestha (ps2958)
- Jeffrey Wan (jw3468)
"""
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.models import *
from keras.layers import *
from sklearn.model_selection import train_test_split, KFold
from keras.metrics import categorical_accuracy
from keras import backend as K
from keras.regularizers import l1, l2
import tensorflow as tf
import keras
from keras.callbacks import EarlyStopping ,ModelCheckpoint
from utils import *
### Data Retrieval
cb513filename = '../data/data_princeton/cb513.npy'
cb6133filteredfilename = '../data/data_princeton/cb6133filtered.npy'
maxlen_seq = r = 700 # protein residues padded to 700
f = 57 # number of features for each residue
#load train
train_df, X_aug_train = load_augmented_data(cb6133filteredfilename ,maxlen_seq)
train_input_seqs, train_target_seqs = train_df[['input', 'expected']][(train_df.len <= maxlen_seq)].values.T
#load test
test_df, X_aug_test = load_augmented_data(cb513filename,maxlen_seq)
test_input_seqs, test_target_seqs = test_df[['input','expected']][(test_df.len <= maxlen_seq)].values.T
#tokenizer
# Converting the inputs to trigrams
train_input_grams = seq2ngrams(train_input_seqs)
# Initializing and defining the tokenizer encoders and decoders based on the train set
tokenizer_encoder = Tokenizer()
tokenizer_encoder.fit_on_texts(train_input_grams)
tokenizer_decoder = Tokenizer(char_level = True)
tokenizer_decoder.fit_on_texts(train_target_seqs)
# Using the tokenizer to encode and decode the sequences for use in training
#train inputs
train_input_data = tokenizer_encoder.texts_to_sequences(train_input_grams)
X_train = sequence.pad_sequences(train_input_data, maxlen = maxlen_seq, padding = 'post')
#train targets
train_target_data = tokenizer_decoder.texts_to_sequences(train_target_seqs)
train_target_data = sequence.pad_sequences(train_target_data, maxlen = maxlen_seq, padding = 'post')
y_train = to_categorical(train_target_data)
#test inputs
test_input_grams = seq2ngrams(test_input_seqs)
test_input_data = tokenizer_encoder.texts_to_sequences(test_input_grams)
X_test = sequence.pad_sequences(test_input_data, maxlen = maxlen_seq, padding = 'post')
#test targets
test_target_data = tokenizer_decoder.texts_to_sequences(test_target_seqs)
test_target_data = sequence.pad_sequences(test_target_data, maxlen = maxlen_seq, padding = 'post')
y_test = to_categorical(test_target_data)
# Computing the number of words and number of tags
n_words = len(tokenizer_encoder.word_index) + 1
n_tags = len(tokenizer_decoder.word_index) + 1
#### validation data
n_samples = len(train_df)
np.random.seed(0)
validation_idx = np.random.choice(np.arange(n_samples), size=300, replace=False)
training_idx = np.array(list(set(np.arange(n_samples))-set(validation_idx)))
X_val = X_train[validation_idx]
X_train = X_train[training_idx]
y_val = y_train[validation_idx]
y_train = y_train[training_idx]
X_aug_val = X_aug_train[validation_idx]
X_aug_train = X_aug_train[training_idx]
#### end validation
p = {'activation1':[relu, softmax],
'activation2':[relu, softmax],
'optimizer': ['Nadam', "RMSprop"],
'losses': ['categorical_crossentropy', keras.losses.binary_crossentropy],
'first_hidden_layer': [10, 8, 6],
'second_hidden_layer': [2, 4, 6],
'batch_size': [64, 128, 10000],
'epochs': [50, 75]}
def train(X_train, y_train, X_val=None, y_val=None):
"""
Main Training function with the following properties:
Optimizer - Nadam
Loss function - Categorical Crossentropy
Batch Size - 128 (any more will exceed Collab GPU RAM)
Epochs - 50
"""
model = CNN_BIGRU()
model.compile(
optimizer="Nadam",
loss="categorical_crossentropy",
metrics=["accuracy", accuracy])
if X_val is not None and y_val is not None:
earlyStopping = EarlyStopping(monitor='val_accuracy', patience=10, verbose=1, mode='max')
checkpointer = ModelCheckpoint(filepath=load_file, monitor='val_accuracy', verbose=1, save_best_only=True,
mode='max')
# Training the model on the training data and validating using the validation set
history = model.fit(X_train, y_train, validation_data=(X_val, y_val),
epochs=75, batch_size=128, callbacks=[checkpointer, earlyStopping], verbose=1, shuffle=True)
else:
history = model.fit(X_train, y_train,
batch_size=128, epochs=75)
return history, model
""" Build model """
def conv_block(x, activation=True, batch_norm=True, drop_out=True, res=True):
cnn = Conv1D(64, 11, padding="same")(x)
if activation: cnn = TimeDistributed(Activation("relu"))(cnn)
if batch_norm: cnn = TimeDistributed(BatchNormalization())(cnn)
if drop_out: cnn = TimeDistributed(Dropout(0.5))(cnn)
if res: cnn = Concatenate(axis=-1)([x, cnn])
return cnn
def super_conv_block(x):
c3 = Conv1D(32, 1, padding="same")(x)
c3 = TimeDistributed(Activation("relu"))(c3)
c3 = TimeDistributed(BatchNormalization())(c3)
c7 = Conv1D(64, 3, padding="same")(x)
c7 = TimeDistributed(Activation("relu"))(c7)
c7 = TimeDistributed(BatchNormalization())(c7)
c11 = Conv1D(128, 5, padding="same")(x)
c11 = TimeDistributed(Activation("relu"))(c11)
c11 = TimeDistributed(BatchNormalization())(c11)
x = Concatenate(axis=-1)([x, c3, c7, c11])
x = TimeDistributed(Dropout(0.5))(x)
return x
def CNN_BIGRU():
input = Input(shape=(maxlen_seq,))
embed_out = Embedding(input_dim=n_words, output_dim=128, input_length=maxlen_seq)(input)
profile_input = Input(shape=(maxlen_seq, 22))
x = concatenate([embed_out, profile_input]) # 5600, 700, 150
x = super_conv_block(x)
x = conv_block(x)
x = super_conv_block(x)
x = conv_block(x)
x = super_conv_block(x)
x = conv_block(x)
x = Bidirectional(CuDNNGRU(units=256, return_sequences=True, recurrent_regularizer=l2(0.2)))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(256, activation="relu"))(x)
x = TimeDistributed(Dropout(0.5))(x)
y = TimeDistributed(Dense(n_tags, activation="softmax"))(x)
model = Model([input, profile_input], y)
model.summary()
return model
load_file = "./model/mod_1-CB513-"+datetime.now().strftime("%Y_%m_%d-%H_%M")+".h5"
history, model = train([X_train, X_aug_train], y_train, X_val=[X_val, X_aug_val], y_val=y_val)
model.load_weights(load_file)
print("####evaluate:")
score = model.evaluate([X_test,X_aug_test], y_test, verbose=2, batch_size=1)
print(score)
print ('test loss:', score[0])
print ('test accuracy:', score[2]) | 1,690 | 0 | 69 |
67426b9d856287309ee4d0aa254d80958bd28adc | 689 | py | Python | stage1/Criteria.py | zyl1205/mywork | 04b02b5f72dde17f094b169459385ca8635ecb95 | [
"MIT"
] | null | null | null | stage1/Criteria.py | zyl1205/mywork | 04b02b5f72dde17f094b169459385ca8635ecb95 | [
"MIT"
] | null | null | null | stage1/Criteria.py | zyl1205/mywork | 04b02b5f72dde17f094b169459385ca8635ecb95 | [
"MIT"
] | null | null | null | #
#author :Sachin Mehta
#Description : This repository contains source code for semantically segmenting WSIs; however, it could be easily
# adapted for other domains such as natural image segmentation
#File Description: This file implements the Cross entropy loss for 2D data.
#==============================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F | 34.45 | 117 | 0.624093 | #
#author :Sachin Mehta
#Description : This repository contains source code for semantically segmenting WSIs; however, it could be easily
# adapted for other domains such as natural image segmentation
#File Description: This file implements the Cross entropy loss for 2D data.
#==============================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight):
super().__init__()
self.loss = nn.NLLLoss2d()
def forward(self, outputs, targets):
return self.loss(F.log_softmax(outputs), targets) | 142 | 15 | 77 |
aba6561375f6e3d6efb4747ad67f9608dec2e9bc | 2,419 | py | Python | aardvark/aardvark_connection.py | dalyIsaac/PIM_Mini_Tests | 6929d2ab2e580333fb8a5487a752d9961da91978 | [
"Unlicense"
] | null | null | null | aardvark/aardvark_connection.py | dalyIsaac/PIM_Mini_Tests | 6929d2ab2e580333fb8a5487a752d9961da91978 | [
"Unlicense"
] | null | null | null | aardvark/aardvark_connection.py | dalyIsaac/PIM_Mini_Tests | 6929d2ab2e580333fb8a5487a752d9961da91978 | [
"Unlicense"
] | null | null | null | """
Defines the abstract class Aardvark Connection, which is used by child classes
to test the I2C and SPI connections.
"""
from abc import ABCMeta, abstractmethod
import unittest
from aardvark_py import aa_find_devices_ext, aa_open, aa_close, AA_PORT_NOT_FREE
class AardvarkConnection(unittest.TestCase):
"""
Abstract class which is used by child classes to test the I2C and SPI connections.
"""
__metaclass__ = ABCMeta
@staticmethod
@abstractmethod
def get_port_number():
"""Returns the port number"""
pass
def test_01_port_ready(self):
"""Tests that PORT_NUMBER is connected and available"""
num, ports, unique_ids = aa_find_devices_ext(16, 16)
self.assertGreater(num, 0) # check that devices have been returned
if num > 0:
# dictionary of form = port : (unique_id, in_use_status)
devices = {}
for i in range(num):
port, in_use_status = AardvarkConnection.get_status(ports[i])
devices[port] = unique_ids, in_use_status
# checks that the port is detected
self.assertEqual(self.port_number in devices.keys(), True)
# checks that it's available
self.assertEqual(devices[self.port_number][1], False)
@staticmethod
def get_status(port):
"""Returns the status of the port and the port number"""
if port & AA_PORT_NOT_FREE:
port = port & ~AA_PORT_NOT_FREE
return port, True
return port, False
def test_02_open_close(self):
"""Tests that the port can be successfully opened and closed"""
handle = aa_open(self.port_number)
self.assertGreater(handle, 0) # check that the port is open
self.configure()
_, status = AardvarkConnection.get_status(self.port_number)
self.assertEqual(status, False)
num_closed = aa_close(handle)
self.assertEqual(num_closed, 1)
@abstractmethod
def configure(self):
"""
Configures the following attributes:
- handle_config
- pullup_resistors
- target_power
- bitrate
- bus_timeout
"""
pass
| 32.253333 | 86 | 0.641174 | """
Defines the abstract class Aardvark Connection, which is used by child classes
to test the I2C and SPI connections.
"""
from abc import ABCMeta, abstractmethod
import unittest
from aardvark_py import aa_find_devices_ext, aa_open, aa_close, AA_PORT_NOT_FREE
class AardvarkConnection(unittest.TestCase):
"""
Abstract class which is used by child classes to test the I2C and SPI connections.
"""
__metaclass__ = ABCMeta
def __init__(self, methodName='runTest'):
self.port_number = AardvarkConnection.get_port_number()
self.handle = None
unittest.TestCase.__init__(self, methodName)
@staticmethod
@abstractmethod
def get_port_number():
"""Returns the port number"""
pass
def test_01_port_ready(self):
"""Tests that PORT_NUMBER is connected and available"""
num, ports, unique_ids = aa_find_devices_ext(16, 16)
self.assertGreater(num, 0) # check that devices have been returned
if num > 0:
# dictionary of form = port : (unique_id, in_use_status)
devices = {}
for i in range(num):
port, in_use_status = AardvarkConnection.get_status(ports[i])
devices[port] = unique_ids, in_use_status
# checks that the port is detected
self.assertEqual(self.port_number in devices.keys(), True)
# checks that it's available
self.assertEqual(devices[self.port_number][1], False)
@staticmethod
def get_status(port):
"""Returns the status of the port and the port number"""
if port & AA_PORT_NOT_FREE:
port = port & ~AA_PORT_NOT_FREE
return port, True
return port, False
def test_02_open_close(self):
"""Tests that the port can be successfully opened and closed"""
handle = aa_open(self.port_number)
self.assertGreater(handle, 0) # check that the port is open
self.configure()
_, status = AardvarkConnection.get_status(self.port_number)
self.assertEqual(status, False)
num_closed = aa_close(handle)
self.assertEqual(num_closed, 1)
@abstractmethod
def configure(self):
"""
Configures the following attributes:
- handle_config
- pullup_resistors
- target_power
- bitrate
- bus_timeout
"""
pass
| 164 | 0 | 27 |
2489033ff8cd99e1f60ac2c043d6d3a6b725a1e8 | 2,117 | py | Python | Nut-defect-detection/dataset.py | GT-AcerZhang/Dive-into-Computer-Vision-in-PaddlePaddle2.0 | a0ee2058996bd2cac4e46bb4c0d93520251173fd | [
"Apache-2.0"
] | 5 | 2020-12-06T12:48:29.000Z | 2021-02-27T16:45:50.000Z | Nut-defect-detection/dataset.py | GT-AcerZhang/Dive-into-Computer-Vision-in-PaddlePaddle2.0 | a0ee2058996bd2cac4e46bb4c0d93520251173fd | [
"Apache-2.0"
] | null | null | null | Nut-defect-detection/dataset.py | GT-AcerZhang/Dive-into-Computer-Vision-in-PaddlePaddle2.0 | a0ee2058996bd2cac4e46bb4c0d93520251173fd | [
"Apache-2.0"
] | 2 | 2021-02-22T06:36:54.000Z | 2021-03-05T09:32:03.000Z | #!/usr/bin/env python
# _*_coding:utf-8 _*_
#@Time :2020/12/8 13:52
#@Author :Wenbo
#@FileName: dataset.py.py
import numpy as np
from PIL import Image
from paddle.io import Dataset
import paddle.vision.transforms as T
import paddle as pd
class MyDataset(Dataset):
"""
步骤一:继承paddle.io.Dataset类
"""
def __init__(self, txt, transform=None):
"""
步骤二:实现构造函数,定义数据读取方式,划分训练和测试数据集
"""
super(MyDataset, self).__init__()
imgs = []
f = open(txt, 'r')
for line in f:
line = line.strip('\n')
line = line.rstrip('\n')
words = line.split()
imgs.append((words[0], int(words[1])))
self.imgs = imgs
self.transform = transform
# self.loader = loader
# if __name__ == '__main__':
# transform = T.Compose([
# T.RandomResizedCrop([448,448]),
# T.Transpose(),
# T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
# ])
#
# train_data=MyDataset(txt=r'C:\Users\11982\Desktop\paddle\train.txt', transform=transform)
#
# for i in range(len(train_data)):
# sample = train_data[i]
# print(sample[0], sample[1].shape)
| 32.075758 | 100 | 0.529523 | #!/usr/bin/env python
# _*_coding:utf-8 _*_
#@Time :2020/12/8 13:52
#@Author :Wenbo
#@FileName: dataset.py.py
import numpy as np
from PIL import Image
from paddle.io import Dataset
import paddle.vision.transforms as T
import paddle as pd
class MyDataset(Dataset):
"""
步骤一:继承paddle.io.Dataset类
"""
def __init__(self, txt, transform=None):
"""
步骤二:实现构造函数,定义数据读取方式,划分训练和测试数据集
"""
super(MyDataset, self).__init__()
imgs = []
f = open(txt, 'r')
for line in f:
line = line.strip('\n')
line = line.rstrip('\n')
words = line.split()
imgs.append((words[0], int(words[1])))
self.imgs = imgs
self.transform = transform
# self.loader = loader
def __getitem__(self, index): # 这个方法是必须要有的,用于按照索引读取每个元素的具体内容
fn, label = self.imgs[index]
# fn是图片path #fn和label分别获得imgs[index]也即是刚才每行中word[0]和word[1]的信息
img = Image.open(fn)
img = img.convert("RGB")
img = np.array(img).astype('float32')
img *= 0.007843
label = np.array([label]).astype(dtype='int64')
# 按照路径读取图片
if self.transform is not None:
img = self.transform(img)
# 数据标签转换为Tensor
return img, label
# return回哪些内容,那么我们在训练时循环读取每个batch时,就能获得哪些内容
# ********************************** #使用__len__()初始化一些需要传入的参数及数据集的调用**********************
def __len__(self):
# 这个函数也必须要写,它返回的是数据集的长度,也就是多少张图片,要和loader的长度作区分
return len(self.imgs)
# if __name__ == '__main__':
# transform = T.Compose([
# T.RandomResizedCrop([448,448]),
# T.Transpose(),
# T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
# ])
#
# train_data=MyDataset(txt=r'C:\Users\11982\Desktop\paddle\train.txt', transform=transform)
#
# for i in range(len(train_data)):
# sample = train_data[i]
# print(sample[0], sample[1].shape)
| 1,047 | 0 | 55 |
603c6f5c42f519d2b48f8c534d7356c54de50fb9 | 3,816 | py | Python | avatar2/targets/gdb_target.py | m3lixir-nyu/avatar2 | fde07d1efa4b5ae6f3be5f19ae508ff19ed840d0 | [
"Apache-2.0"
] | 16 | 2020-12-14T21:31:25.000Z | 2022-01-26T03:21:40.000Z | avatar2/targets/gdb_target.py | m3lixir-nyu/avatar2 | fde07d1efa4b5ae6f3be5f19ae508ff19ed840d0 | [
"Apache-2.0"
] | 3 | 2021-07-27T19:36:05.000Z | 2021-12-31T02:20:53.000Z | avatar2/targets/gdb_target.py | m3lixir-nyu/avatar2 | fde07d1efa4b5ae6f3be5f19ae508ff19ed840d0 | [
"Apache-2.0"
] | 8 | 2020-12-30T13:55:20.000Z | 2022-01-17T03:20:36.000Z | from avatar2.targets import Target, TargetStates
from avatar2.protocols.gdb import GDBProtocol
from .target import action_valid_decorator_factory, synchronize_state
from ..watchmen import watch
| 39.75 | 85 | 0.587264 | from avatar2.targets import Target, TargetStates
from avatar2.protocols.gdb import GDBProtocol
from .target import action_valid_decorator_factory, synchronize_state
from ..watchmen import watch
class GDBTarget(Target):
def __init__(self, avatar,
gdb_executable=None, gdb_additional_args=None,
gdb_ip='127.0.0.1', gdb_port=3333,
gdb_serial_device='/dev/ttyACM0',
gdb_serial_baud_rate=38400,
gdb_serial_parity='none',
gdb_verbose_mi=False,
serial=False,
enable_init_files=False,
local_binary=None,
arguments=None,
**kwargs
):
super(GDBTarget, self).__init__(avatar, **kwargs)
self.gdb_executable = (gdb_executable if gdb_executable is not None
else self._arch.get_gdb_executable())
self.gdb_additional_args = gdb_additional_args if gdb_additional_args else []
self.gdb_ip = gdb_ip
self.gdb_port = gdb_port
self.gdb_serial_device = gdb_serial_device
self.gdb_serial_baud_rate = gdb_serial_baud_rate
self.gdb_serial_parity = gdb_serial_parity
self._serial = serial
self._local_binary = local_binary
self._arguments = arguments
self._enable_init_files = enable_init_files
self._verbose_gdbmi = gdb_verbose_mi
def init(self):
gdb = GDBProtocol(gdb_executable=self.gdb_executable,
arch=self._arch,
additional_args=self.gdb_additional_args,
avatar=self.avatar, origin=self,
enable_init_files=self._enable_init_files,
binary=self._local_binary,
local_arguments=self._arguments,
verbose=self._verbose_gdbmi)
# If we are debugging a program locally,
# we do not need to establish any connections
if not self._local_binary:
if not self._serial:
if gdb.remote_connect(ip=self.gdb_ip, port=self.gdb_port):
self.log.info("Connected to Target")
else:
self.log.warning("Connecting failed")
else:
if gdb.remote_connect_serial(device=self.gdb_serial_device,
baud_rate=self.gdb_serial_baud_rate,
parity=self.gdb_serial_parity):
self.log.info("Connected to Target")
else:
self.log.warning("Connecting failed")
else:
self.update_state(TargetStates.INITIALIZED)
self.protocols.set_all(gdb)
if self._local_binary:
self.wait(state=TargetStates.INITIALIZED)
else:
self.wait()
@watch('TargetCont')
@action_valid_decorator_factory(TargetStates.INITIALIZED, 'execution')
@synchronize_state(TargetStates.RUNNING)
def run(self):
return self.protocols.execution.run()
def cont(self, blocking=True):
if self.state != TargetStates.INITIALIZED:
super(GDBTarget, self).cont(blocking=blocking)
else:
self.run()
@action_valid_decorator_factory(TargetStates.INITIALIZED, 'execution')
def disable_aslr(self):
self.protocols.execution.set_gdb_variable('disable-randomization',
'on')
@action_valid_decorator_factory(TargetStates.INITIALIZED, 'execution')
def enable_aslr(self):
self.protocols.execution.set_gdb_variable('disable-randomization',
'off')
| 3,134 | 459 | 23 |
0000fa8f2d70592b5ba91e1ed71c42ac79a16509 | 67 | py | Python | starfiles/deneme.py | harunlakodla/Flutter_Python-flutter_python | 9dfa020fd73bff3bcf965476060ca441349ba96b | [
"Apache-2.0"
] | 10 | 2020-02-02T21:47:34.000Z | 2022-02-05T23:55:15.000Z | starfiles/deneme.py | harunlakodla/Flutter_Python-flutter_python | 9dfa020fd73bff3bcf965476060ca441349ba96b | [
"Apache-2.0"
] | 1 | 2021-11-02T10:43:48.000Z | 2021-11-02T10:43:48.000Z | starfiles/deneme.py | harunlakodla/Flutter_Python-flutter_python | 9dfa020fd73bff3bcf965476060ca441349ba96b | [
"Apache-2.0"
] | null | null | null | print("merhaba")
print("merhaba")
print("merhaba")
print("merhaba") | 16.75 | 16 | 0.716418 | print("merhaba")
print("merhaba")
print("merhaba")
print("merhaba") | 0 | 0 | 0 |
57f17179953aaaea6b6d3ab5a3fb3b7c60c6592d | 5,849 | py | Python | pulsar/system/MakeSystem.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | pulsar/system/MakeSystem.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | pulsar/system/MakeSystem.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import re
import math
from copy import deepcopy
import pulsar as psr
from . import ApplyBasisSet
def make_system(SomeString):
"""This function turns a string into a system object, which it then returns
Special thanks to Lori A. Burns for the original version of this function
=================================
Rules For Structuring Your String
=================================
* An entire line may be commented out by prepending a '#' character
* To specify the units of your system:
* Add the line: 'units X' or 'units=X'
* 'X' may be:
* 'bohr', 'au', or 'a.u.' for atomic units
* 'ang'or angstrom for Angstroms
* Unit cells of crystals may be used as input. This is done by providing
the fractional coordinates of your atoms and the cell's dimensions
* sides are specified by 'sides a b c'
* Units can be specified by units keyword
* angles are specified by 'angles alpha beta gamma'
* Units are in degrees
"""
#For the below comments, "any number" includes 0
comment = re.compile(r'^\s*#')#Comment line
blank = re.compile(r'^\s*$')#Blank aside from white-space
bohr = re.compile(r'^\s*units?[\s=]+(bohr|au|a.u.)\s*$', re.IGNORECASE)#a.u.
ang = re.compile(r'^\s*units?[\s=]+(ang|angstrom)\s*$', re.IGNORECASE)#Ang
#Ghosts and atoms?
atom = re.compile(r'^(?:(?P<gh1>@)|(?P<gh2>Gh\())?(?P<label>(?P<symbol>[A-Z]{1,3})(?:(_\w+)|(\d+))?)(?(gh2)\))(?:@(?P<mass>\d+\.\d+))?$', re.IGNORECASE)
cgmp = re.compile(r'^\s*(-?\d+)\s+(\d+)\s*$')#Charge/Mult
frag = re.compile(r'^\s*--\s*$')#Fragment sperator
#Matches something equals a number
variable = re.compile(r'^\s*(\w+)\s*=\s*(-?\d+\.\d+|-?\d+\.|-?\.\d+|-?\d+|tda)\s*$', re.IGNORECASE)
ghost = re.compile(r'@(.*)|Gh\((.*)\)', re.IGNORECASE)#Matches ghosts
#Mathches a line that starts with 'sides' and has three numbers
UCsides = re.compile(r'^\s*sides\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*$',re.IGNORECASE)
#Mathches a line that starts with 'angles' and has three numbers
UCangles = re.compile(r'^\s*angles\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*$',re.IGNORECASE)
Systems=[0] #Atoms per fragment
Zs=[] #Atomic number of each atom
ToAU=1/0.52917721067
Charge=[0] #Charges, 0-th element is full system, i-th (i>0) is i-th frag
Mult=[1] #Multiplicities, same as charges
Sides=[]
Angles=[]
Carts=[]
lines = re.split('\n', SomeString)
for line in lines:
if comment.match(line) or blank.match(line) or ang.match(line):
continue
elif bohr.match(line):
ToAU=1.0
elif cgmp.match(line):
Charge[NFrags()] = int(cgmp.match(line).group(1))
Mult[NFrags()] = int(cgmp.match(line).group(2))
# handle fragment markers and default fragment cgmp
elif frag.match(line):
Systems.append(0)
Charge.append(0)
Mult.append(1)
DaAtoms[len(DaAtoms)]=[]
elif UCsides.match(line):
for i in range(1,4):
Sides.append(float(UCsides.match(line).group(i)))
elif UCangles.match(line):
for i in range(1,4):
Angles.append(float(UCangles.match(line).group(i)))
# handle atoms
elif atom.match(line.split()[0].strip()):
entries = re.split(r'\s+|\s*,\s*', line.strip())
atomm = atom.match(line.split()[0].strip().upper())
atomLabel = atomm.group('label')
atomSym = atomm.group('symbol')
# We don't know whether the @C or Gh(C) notation matched. Do a quick check.
ghostAtom = False if (atomm.group('gh1') is None and atomm.group('gh2') is None) else True
# handle cartesians
if len(entries) == 4:
for i in range(1,4):
Carts.append(float(entries[i]))
Zs.append(psr.atomic_z_from_symbol(atomSym))
Systems[NFrags()]+=1
else:
raise PulsarException('make_system: Unidentifiable line in geometry specification: %s' % (line))
DaSpace=psr.Space()
Periodic=(len(Sides)==3 and len(Angles)==3)
NewSides=[ToAU*i for i in Sides]
if Periodic:
DaSpace=psr.Space(Angles,NewSides)
ToAU=1.0
molu=psr.AtomSetUniverse()
for i in range(0,len(Zs)):
TempCarts=[ToAU*Carts[3*i+j] for j in range(0,3)]
molu.insert(psr.create_atom(TempCarts,Zs[i]))
DaSys=psr.System(molu,True)
if Periodic:
Newu=psr.system.Frac2Cart(molu,DaSpace)
UC=psr.CarveUC(
psr.MakeSuperCell(Newu,[3,3,3],DaSpace.LatticeSides),
DaSpace.LatticeSides)
molu=psr.CleanUC(UC,DaSpace.LatticeSides)
DaSys=psr.System(molu,True)
DaSys.space=DaSpace
DaSys.charge=Charge[0]
DaSys.multiplicity=Mult[0]
return DaSys
def make_wf(bs,sysstring):
"""This is a convenience function for making a default input
wavefunction.
params:
bs : string giving the name of the basis set
sysstring: string to pass to make_system
example:
wfn = make_wf("aug-cc-pvdz", <triple_quote>
H 0.0 0.0 0.0
H 0.0 0.0 0.89
<triple_quote>)
"""
temp_sys = make_system(sysstring)
temp_sys = ApplyBasisSet.apply_single_basis("PRIMARY",bs,temp_sys)
try: #Try to put a fitting basis on it
sys = psr.apply_single_basis("FITTING",bs+"-jkfit",temp_sys)
temp_sys = sys
except:
pass
wf = psr.Wavefunction()
wf.system = temp_sys
return wf
| 37.980519 | 156 | 0.569328 | import re
import math
from copy import deepcopy
import pulsar as psr
from . import ApplyBasisSet
def make_system(SomeString):
"""This function turns a string into a system object, which it then returns
Special thanks to Lori A. Burns for the original version of this function
=================================
Rules For Structuring Your String
=================================
* An entire line may be commented out by prepending a '#' character
* To specify the units of your system:
* Add the line: 'units X' or 'units=X'
* 'X' may be:
* 'bohr', 'au', or 'a.u.' for atomic units
* 'ang'or angstrom for Angstroms
* Unit cells of crystals may be used as input. This is done by providing
the fractional coordinates of your atoms and the cell's dimensions
* sides are specified by 'sides a b c'
* Units can be specified by units keyword
* angles are specified by 'angles alpha beta gamma'
* Units are in degrees
"""
#For the below comments, "any number" includes 0
comment = re.compile(r'^\s*#')#Comment line
blank = re.compile(r'^\s*$')#Blank aside from white-space
bohr = re.compile(r'^\s*units?[\s=]+(bohr|au|a.u.)\s*$', re.IGNORECASE)#a.u.
ang = re.compile(r'^\s*units?[\s=]+(ang|angstrom)\s*$', re.IGNORECASE)#Ang
#Ghosts and atoms?
atom = re.compile(r'^(?:(?P<gh1>@)|(?P<gh2>Gh\())?(?P<label>(?P<symbol>[A-Z]{1,3})(?:(_\w+)|(\d+))?)(?(gh2)\))(?:@(?P<mass>\d+\.\d+))?$', re.IGNORECASE)
cgmp = re.compile(r'^\s*(-?\d+)\s+(\d+)\s*$')#Charge/Mult
frag = re.compile(r'^\s*--\s*$')#Fragment sperator
#Matches something equals a number
variable = re.compile(r'^\s*(\w+)\s*=\s*(-?\d+\.\d+|-?\d+\.|-?\.\d+|-?\d+|tda)\s*$', re.IGNORECASE)
ghost = re.compile(r'@(.*)|Gh\((.*)\)', re.IGNORECASE)#Matches ghosts
#Mathches a line that starts with 'sides' and has three numbers
UCsides = re.compile(r'^\s*sides\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*$',re.IGNORECASE)
#Mathches a line that starts with 'angles' and has three numbers
UCangles = re.compile(r'^\s*angles\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*(\d+|\d+\.\d+)\s*$',re.IGNORECASE)
Systems=[0] #Atoms per fragment
Zs=[] #Atomic number of each atom
ToAU=1/0.52917721067
Charge=[0] #Charges, 0-th element is full system, i-th (i>0) is i-th frag
Mult=[1] #Multiplicities, same as charges
Sides=[]
Angles=[]
Carts=[]
def NFrags():
return len(Systems)-1
lines = re.split('\n', SomeString)
for line in lines:
if comment.match(line) or blank.match(line) or ang.match(line):
continue
elif bohr.match(line):
ToAU=1.0
elif cgmp.match(line):
Charge[NFrags()] = int(cgmp.match(line).group(1))
Mult[NFrags()] = int(cgmp.match(line).group(2))
# handle fragment markers and default fragment cgmp
elif frag.match(line):
Systems.append(0)
Charge.append(0)
Mult.append(1)
DaAtoms[len(DaAtoms)]=[]
elif UCsides.match(line):
for i in range(1,4):
Sides.append(float(UCsides.match(line).group(i)))
elif UCangles.match(line):
for i in range(1,4):
Angles.append(float(UCangles.match(line).group(i)))
# handle atoms
elif atom.match(line.split()[0].strip()):
entries = re.split(r'\s+|\s*,\s*', line.strip())
atomm = atom.match(line.split()[0].strip().upper())
atomLabel = atomm.group('label')
atomSym = atomm.group('symbol')
# We don't know whether the @C or Gh(C) notation matched. Do a quick check.
ghostAtom = False if (atomm.group('gh1') is None and atomm.group('gh2') is None) else True
# handle cartesians
if len(entries) == 4:
for i in range(1,4):
Carts.append(float(entries[i]))
Zs.append(psr.atomic_z_from_symbol(atomSym))
Systems[NFrags()]+=1
else:
raise PulsarException('make_system: Unidentifiable line in geometry specification: %s' % (line))
DaSpace=psr.Space()
Periodic=(len(Sides)==3 and len(Angles)==3)
NewSides=[ToAU*i for i in Sides]
if Periodic:
DaSpace=psr.Space(Angles,NewSides)
ToAU=1.0
molu=psr.AtomSetUniverse()
for i in range(0,len(Zs)):
TempCarts=[ToAU*Carts[3*i+j] for j in range(0,3)]
molu.insert(psr.create_atom(TempCarts,Zs[i]))
DaSys=psr.System(molu,True)
if Periodic:
Newu=psr.system.Frac2Cart(molu,DaSpace)
UC=psr.CarveUC(
psr.MakeSuperCell(Newu,[3,3,3],DaSpace.LatticeSides),
DaSpace.LatticeSides)
molu=psr.CleanUC(UC,DaSpace.LatticeSides)
DaSys=psr.System(molu,True)
DaSys.space=DaSpace
DaSys.charge=Charge[0]
DaSys.multiplicity=Mult[0]
return DaSys
def make_wf(bs,sysstring):
"""This is a convenience function for making a default input
wavefunction.
params:
bs : string giving the name of the basis set
sysstring: string to pass to make_system
example:
wfn = make_wf("aug-cc-pvdz", <triple_quote>
H 0.0 0.0 0.0
H 0.0 0.0 0.89
<triple_quote>)
"""
temp_sys = make_system(sysstring)
temp_sys = ApplyBasisSet.apply_single_basis("PRIMARY",bs,temp_sys)
try: #Try to put a fitting basis on it
sys = psr.apply_single_basis("FITTING",bs+"-jkfit",temp_sys)
temp_sys = sys
except:
pass
wf = psr.Wavefunction()
wf.system = temp_sys
return wf
| 22 | 0 | 31 |
c0a33a51c188efb2dd00413f01239574867164e0 | 1,336 | py | Python | individual1.py | MaksimSimanskiy/4.2 | 3fb83a11211383411333994dd19073e0073e9145 | [
"MIT"
] | null | null | null | individual1.py | MaksimSimanskiy/4.2 | 3fb83a11211383411333994dd19073e0073e9145 | [
"MIT"
] | null | null | null | individual1.py | MaksimSimanskiy/4.2 | 3fb83a11211383411333994dd19073e0073e9145 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Поле first — целое число, целая часть числа; поле second — положительное целое число,
дробная часть числа. Реализовать метод multiply() — умножение на произвольное целое
число типа int. Метод должен правильно работать при любых допустимых значениях first и
second.
"""
import math
if __name__ == '__main__':
t1 = Real(12, 5)
t2 = Real(6, 5)
t2.read()
t1.display()
print(t1 * 5)
| 27.833333 | 87 | 0.592814 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Поле first — целое число, целая часть числа; поле second — положительное целое число,
дробная часть числа. Реализовать метод multiply() — умножение на произвольное целое
число типа int. Метод должен правильно работать при любых допустимых значениях first и
second.
"""
import math
class Real:
def __init__(self, first, second):
self.first = first
self.second = second
if (self.first < 0) or (self.second < 0):
raise ValueError()
def read(self):
self.first = int(input("Введите целую часть числа "))
self.second = int(input("Введите дробную часть числа "))
def __str__(self):
return f"{self.first}.{self.second}"
def __repr__(self):
return self.__str__()
def display(self):
print(f"Число с плавающей точкой {self.first}.{self.second}")
def __mul__(self, other): # *
length = int(math.log10(self.second)) + 1
second = (self.second * other) % (10 ** length)
fractal = (self.second * other) // (10 ** length)
first = self.first * other + fractal
return Real(first, second)
if __name__ == '__main__':
t1 = Real(12, 5)
t2 = Real(6, 5)
t2.read()
t1.display()
print(t1 * 5)
| 742 | -10 | 199 |
62875e32dfa94934b88145781d02debe2392ae0e | 5,345 | py | Python | app.py | abhishekshree/FlaskLearnApp | eb201b3f414c482dffe860397ffb64a5e2b4b826 | [
"MIT"
] | null | null | null | app.py | abhishekshree/FlaskLearnApp | eb201b3f414c482dffe860397ffb64a5e2b4b826 | [
"MIT"
] | 2 | 2020-05-09T19:37:11.000Z | 2020-05-09T19:37:11.000Z | app.py | abhishekshree/FlaskLearnApp | eb201b3f414c482dffe860397ffb64a5e2b4b826 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, redirect, url_for, request, make_response
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired , Email, Length
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from datetime import datetime
app = Flask( __name__, static_folder='static' )
app.config.from_pyfile('config.py')
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
@app.route('/login', methods=['GET', 'POST'])
@app.route('/signup', methods=['GET', 'POST'])
@app.route('/')
@app.route('/all')
@app.route('/about')
@app.route('/post/<int:post_id>')
@app.route('/add')
@login_required
@app.route('/addpost', methods=['GET', 'POST'])
@login_required
@app.route('/feedback')
@app.route('/addFeed', methods = ['GET', 'POST'])
@app.route('/admin')
@login_required
@app.route('/delete/<int:post_id>')
@login_required
@app.errorhandler(404)
@app.route('/logout')
@login_required
@app.route("/sitemap.xml")
@app.route("/robots.txt")
if __name__ == "__main__":
app.run() | 26.073171 | 108 | 0.731712 | from flask import Flask, render_template, redirect, url_for, request, make_response
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired , Email, Length
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from datetime import datetime
app = Flask( __name__, static_folder='static' )
app.config.from_pyfile('config.py')
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
subtitle = db.Column(db.String(200))
author = db.Column(db.String(50))
date_posted = db.Column(db.DateTime)
content = db.Column(db.Text)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
email = db.Column(db.String(200))
subject = db.Column(db.String(200))
message = db.Column(db.Text)
date = db.Column(db.DateTime)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(80))
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('Username', validators=[InputRequired(), Length(min=4 , max=15)])
password = PasswordField('Password', validators=[InputRequired(), Length(min=8, max=80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('Email', validators=[InputRequired(), Email(message='Invalid Email'), Length(max=100)])
username = StringField('Username', validators=[InputRequired(), Length(min=4 , max=15)])
password = PasswordField('Password', validators=[InputRequired(), Length(min=8, max=80)])
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('admin'))
return render_template('error.html')
return render_template('login.html', form=form)
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = RegisterForm()
if form.validate_on_submit():
hashed_pass = generate_password_hash(form.password.data, method='sha256')
new_user = User(username=form.username.data, email=form.email.data, password=hashed_pass)
db.session.add(new_user)
db.session.commit()
return "New user registered !"
return render_template('signup.html', form=form)
@app.route('/')
def index():
posts = Post.query.order_by(Post.date_posted.desc()).limit(5)
return render_template('index.html', posts=posts)
@app.route('/all')
def all():
po = Post.query.order_by(Post.date_posted.desc()).all()
return render_template('all.html', p=po)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/post/<int:post_id>')
def post(post_id):
post = Post.query.filter_by(id=post_id).one()
date_posted = post.date_posted.strftime('%B %d, %Y')
return render_template('post.html', post=post, date_posted=date_posted)
@app.route('/add')
@login_required
def add():
return render_template('add.html')
@app.route('/addpost', methods=['GET', 'POST'])
@login_required
def addpost():
title = request.form['title']
subtitle = request.form['subtitle']
author = request.form['author']
content = request.form['content']
p = Post(title=title, subtitle=subtitle, author=author, content=content, date_posted=datetime.now())
db.session.add(p)
db.session.commit()
return redirect(url_for('index'))
@app.route('/feedback')
def feed():
feedbacks = Comment.query.all()
return render_template('feedback.html', feedbacks=feedbacks)
@app.route('/addFeed', methods = ['GET', 'POST'])
def addFeed():
name = request.form['name']
email = request.form['email']
sub = request.form['subject']
message = request.form['message']
d = Comment(name=name, email=email, subject=sub, message=message, date=datetime.now())
db.session.add(d)
db.session.commit()
return redirect(url_for('index'))
@app.route('/admin')
@login_required
def admin():
posts = Post.query.all()
return render_template('admin.html', posts=posts)
@app.route('/delete/<int:post_id>')
@login_required
def delete(post_id):
r = Post.query.get_or_404(post_id)
db.session.delete(r)
db.session.commit()
return redirect(url_for('admin'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html'), 404
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.route("/sitemap.xml")
def sitemap_xml():
response= make_response(render_template("sitemap.xml"))
response.headers['Content-Type'] = 'application/xml'
return response
@app.route("/robots.txt")
def robots_txt():
return render_template("robots.txt")
if __name__ == "__main__":
app.run() | 2,298 | 1,154 | 489 |
fdb576221b2d5230c3aac5e421189c05dcfd9cb2 | 3,484 | py | Python | odoo/doc/_extensions/github_link.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/doc/_extensions/github_link.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/doc/_extensions/github_link.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | import inspect
import importlib
import os.path
from urlparse import urlunsplit
"""
* adds github_link(mode) context variable: provides URL (in relevant mode) of
current document on github
* if sphinx.ext.linkcode is enabled, automatically generates github linkcode
links (by setting config.linkcode_resolve)
Settings
========
* ``github_user``, username/organisation under which the project lives
* ``github_project``, name of the project on github
* (optional) ``version``, github branch to link to (default: master)
Notes
=====
* provided ``linkcode_resolve`` only supports Python domain
* generates https github links
* explicitly imports ``openerp``, so useless for anyone else
"""
def add_doc_link(app, pagename, templatename, context, doctree):
""" Add github_link function linking to the current page on github """
if not app.config.github_user and app.config.github_project:
return
# FIXME: find other way to recover current document's source suffix
# in Sphinx 1.3 it's possible to have mutliple source suffixes and that
# may be useful in the future
source_suffix = app.config.source_suffix
source_suffix = source_suffix if isinstance(source_suffix, basestring) else source_suffix[0]
# can't use functools.partial because 3rd positional is line not mode
context['github_link'] = lambda mode='edit': make_github_link(
app, 'doc/%s%s' % (pagename, source_suffix), mode=mode)
| 33.180952 | 96 | 0.652411 | import inspect
import importlib
import os.path
from urlparse import urlunsplit
"""
* adds github_link(mode) context variable: provides URL (in relevant mode) of
current document on github
* if sphinx.ext.linkcode is enabled, automatically generates github linkcode
links (by setting config.linkcode_resolve)
Settings
========
* ``github_user``, username/organisation under which the project lives
* ``github_project``, name of the project on github
* (optional) ``version``, github branch to link to (default: master)
Notes
=====
* provided ``linkcode_resolve`` only supports Python domain
* generates https github links
* explicitly imports ``openerp``, so useless for anyone else
"""
def setup(app):
app.add_config_value('github_user', None, 'env')
app.add_config_value('github_project', None, 'env')
app.connect('html-page-context', add_doc_link)
def linkcode_resolve(domain, info):
""" Resolves provided object to corresponding github URL
"""
# TODO: js?
if domain != 'py':
return None
if not (app.config.github_user and app.config.github_project):
return None
module, fullname = info['module'], info['fullname']
# TODO: attributes/properties don't have modules, maybe try to look
# them up based on their cached host object?
if not module:
return None
obj = importlib.import_module(module)
for item in fullname.split('.'):
obj = getattr(obj, item, None)
if obj is None:
return None
# get original from decorated methods
try: obj = getattr(obj, '_orig')
except AttributeError: pass
try:
obj_source_path = inspect.getsourcefile(obj)
_, line = inspect.getsourcelines(obj)
except (TypeError, IOError):
# obj doesn't have a module, or something
return None
import openerp
# FIXME: make finding project root project-independent
project_root = os.path.join(os.path.dirname(openerp.__file__), '..')
return make_github_link(
app,
os.path.relpath(obj_source_path, project_root),
line)
app.config.linkcode_resolve = linkcode_resolve
def make_github_link(app, path, line=None, mode="blob"):
config = app.config
urlpath = "/{user}/{project}/{mode}/{branch}/{path}".format(
user=config.github_user,
project=config.github_project,
branch=config.version or 'master',
path=path,
mode=mode,
)
return urlunsplit((
'https',
'github.com',
urlpath,
'',
'' if line is None else 'L%d' % line
))
def add_doc_link(app, pagename, templatename, context, doctree):
""" Add github_link function linking to the current page on github """
if not app.config.github_user and app.config.github_project:
return
# FIXME: find other way to recover current document's source suffix
# in Sphinx 1.3 it's possible to have mutliple source suffixes and that
# may be useful in the future
source_suffix = app.config.source_suffix
source_suffix = source_suffix if isinstance(source_suffix, basestring) else source_suffix[0]
# can't use functools.partial because 3rd positional is line not mode
context['github_link'] = lambda mode='edit': make_github_link(
app, 'doc/%s%s' % (pagename, source_suffix), mode=mode)
| 1,992 | 0 | 46 |
064db3c7022248cee48f4a39a959f0cf7a675d53 | 1,197 | py | Python | examples/annotation_with_sed3.py | vlukes/io3d | 34d048b7f737a5e56610879f6ab103128e8f0750 | [
"MIT"
] | 8 | 2016-09-26T01:35:15.000Z | 2022-02-23T04:05:23.000Z | examples/annotation_with_sed3.py | vlukes/io3d | 34d048b7f737a5e56610879f6ab103128e8f0750 | [
"MIT"
] | 4 | 2016-05-18T11:04:56.000Z | 2018-10-24T11:03:03.000Z | examples/annotation_with_sed3.py | vlukes/io3d | 34d048b7f737a5e56610879f6ab103128e8f0750 | [
"MIT"
] | 6 | 2017-03-24T20:43:21.000Z | 2021-08-23T06:05:34.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Module is used for visualization of segmentation stored in pkl, dcm and other files.
"""
from loguru import logger
import io3d
import sed3
import numpy as np
pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "PATIENT_DICOM", get_root=True)
datap = io3d.read(pth)
# pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "LABELLED_DICOM" , get_root=True)
pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "MASKS_DICOM", "liver", get_root=True)
datap_labeled = io3d.read(pth)
ed = sed3.sed3(datap["data3d"], contour=datap_labeled["data3d"])
ed.show()
ed.seeds
nz = np.nonzero(ed.seeds == 1)
print(np.unique(nz[0]))
nz = np.nonzero(ed.seeds == 2)
print(np.unique(nz[0]))
nz = np.nonzero(ed.seeds == 3)
print(np.unique(nz[0]))
pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "MASKS_DICOM", "liver", get_root=True)
datap = io3d.read(pth)
ed = sed3.sed3(datap["data3d"])
ed.show()
nz_liver = np.nonzero(datap["data3d"])
print(np.unique(nz_liver[0]))
print(f"first slide with the liver: {np.min(np.unique(nz_liver[0]))}")
print(f"last slide with the liver: {np.max(np.unique(nz_liver[0]))}")
| 27.204545 | 102 | 0.695071 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Module is used for visualization of segmentation stored in pkl, dcm and other files.
"""
from loguru import logger
import io3d
import sed3
import numpy as np
pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "PATIENT_DICOM", get_root=True)
datap = io3d.read(pth)
# pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "LABELLED_DICOM" , get_root=True)
pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "MASKS_DICOM", "liver", get_root=True)
datap_labeled = io3d.read(pth)
ed = sed3.sed3(datap["data3d"], contour=datap_labeled["data3d"])
ed.show()
ed.seeds
nz = np.nonzero(ed.seeds == 1)
print(np.unique(nz[0]))
nz = np.nonzero(ed.seeds == 2)
print(np.unique(nz[0]))
nz = np.nonzero(ed.seeds == 3)
print(np.unique(nz[0]))
pth = io3d.datasets.join_path("medical", "orig", "3Dircadb1.1", "MASKS_DICOM", "liver", get_root=True)
datap = io3d.read(pth)
ed = sed3.sed3(datap["data3d"])
ed.show()
nz_liver = np.nonzero(datap["data3d"])
print(np.unique(nz_liver[0]))
print(f"first slide with the liver: {np.min(np.unique(nz_liver[0]))}")
print(f"last slide with the liver: {np.max(np.unique(nz_liver[0]))}")
| 0 | 0 | 0 |
e9c9f3a6a08c24056c1db55707573ce350484ce4 | 8,323 | py | Python | tasks.py | lagmoellertim/ImageShare-DesktopApp | 348ad4a07c790ecb218042e59acf51421e093434 | [
"MIT"
] | 8 | 2018-03-25T22:02:24.000Z | 2021-04-26T22:07:02.000Z | tasks.py | lagmoellertim/ImageShare-DesktopApp | 348ad4a07c790ecb218042e59acf51421e093434 | [
"MIT"
] | null | null | null | tasks.py | lagmoellertim/ImageShare-DesktopApp | 348ad4a07c790ecb218042e59acf51421e093434 | [
"MIT"
] | 1 | 2018-05-24T05:15:43.000Z | 2018-05-24T05:15:43.000Z | from threading import Thread
import gui
from tools import clipboard, qrcode, generate_token
import os
import tempfile
import uuid
import time
import shutil
import tkinter as tk
from tkinter.filedialog import asksaveasfilename
from tkinter.messagebox import askyesno
class Gallery(Thread):
"""
The Gallery class starts a new window with the image gallery.
"""
def __init__(self, image_queue, upload_path, active_gallery_windows):
"""
The gallery class is going to be initialized.
:param image_queue: A queue where all incoming pictures are put in
:param upload_path: The path where the images are going to be stored
:param active_gallery_windows: A list of all currently active gallery windows
"""
Thread.__init__(self)
self.stop = False
self.image_queue = image_queue
self.upload_path = upload_path
self.active_gallery_windows = active_gallery_windows
def task(self):
"""
This task is going to be executed in the main UI thread. It handles the browser/window setup for the gallery
as well as the image to clipboard javascript binding. After that, it starts its own thread and runs in a loop.
:return:
"""
self.browser = gui.GUI.add_browser(
instance=self,
callback_func_name='on_window_close',
url="file:///resources/html/gallery.html",
window_title="ImageShare Gallery"
)
gui.GUI.set_javascript_bindings(
self.browser,
"image_to_clipboard",
clipboard.Clipboard.image_to_clipboard
)
self.start()
def clear_images(self):
"""
When this method is called, all previously displayed images from the gallery disappear. This happends for
example after a new session was started.
:return:
"""
gui.GUI.execute_javascript_func(
self.browser,
"clear"
)
def run(self):
"""
This method creates a thread which is looping continiously. Everytime a new Image enters the image queue,
it will be displayed on in the gallery window using a javascript function.
:return:
"""
time.sleep(.5)
while not self.stop:
if not self.image_queue.empty():
item = self.image_queue.get()
gui.GUI.execute_javascript_func(
self.browser,
"add_image",
item
)
self.image_queue.delete_queue()
def on_window_close(self):
"""
This callback is triggered when the window is going to be closed. It stops the main loop and removes the
window from the list of currently active windows.
:return:
"""
self.stop = True
self.active_gallery_windows.remove(self)
class QRCode(Thread):
"""
The QRCode class starts a new window with the qr code.
"""
def __init__(self, ip, port, token_obj, active_qr_code_windows):
"""
The QR code class is going to be initialized.
:param ip: The IP address the server is listing on. It is used to generate the QR code.
:param port: The post the server is listening on. It is used to generate the QR code.
:param token_obj: The object which contains the current auth token. It is used to generate the QR code.
:param active_qr_code_windows: A list of all currently active qr code windows
"""
Thread.__init__(self)
self.active_qr_code_windows = active_qr_code_windows
self.ip = ip
self.port = port
self.token_obj = token_obj
def task(self):
"""
This task is going to be executed in the main UI thread. It handles the browser/window setup for the qr code
window. After that, it starts its own thread and runs in a loop.
:return:
"""
self.browser = gui.GUI.add_browser(
url="file:///resources/html/qr_code.html",
callback_func_name='on_window_close',
window_title="ImageShare QR Connect"
)
self.start()
def generate_msg(self):
"""
This method generates the QR code message based on the ip, the port and the current token.
:return:
"""
return "http://{}:{}/?key={}".format(
self.ip,
self.port,
self.token_obj.obj)
def set_qr_code(self):
"""
This method generates the qr code message, generates the qr code and injects it into the active window via
a javascript function.
:return:
"""
path = os.path.join(tempfile.gettempdir(), "{}.png".format(uuid.uuid4())).replace("\\", "/")
qrcode.generate_qr_code(self.generate_msg(), path)
gui.GUI.execute_javascript_func(
self.browser,
"set_qr_code",
path
)
def run(self):
"""
This method starts a thread which will then asynchronously add the qr code to the window.
:return:
"""
self.set_qr_code()
def on_window_close(self):
"""
This callback gets executed when the qr code window is closed. It removes the window from the list of
currently active ones.
:return:
"""
self.active_qr_code_windows.remove(self)
class NewSession(Thread):
"""
When a new session should start, this class handles the whole procedure.
"""
def __init__(self, token_obj, upload_path, active_gallery_windows, active_qr_code_windows, image_queue):
"""
This method initializes all the pieces that are needed to start a new session.
:param token_obj: The object which contains the current auth token.
:param upload_path: The path where the images are going to be stored
:param active_gallery_windows: A list of all currently active gallery windows
:param active_qr_code_windows: A list of all currently active qr code windows
:param image_queue: A queue where all incoming pictures are put in
"""
Thread.__init__(self)
self.token_obj = token_obj
self.upload_path = upload_path
self.active_gallery_windows = active_gallery_windows
self.active_qr_code_windows = active_qr_code_windows
self.image_queue = image_queue
self.new_session = False
def start_new_session(self):
"""
When this method gets called, the mainloop of the thread opens a dialog to start a new session.
:return:
"""
self.new_session = True
def run(self):
"""
This method start a thread which contains a continous loop. It handles the creating of new sessions, the
generation of new tokens, saving the images to a new path, cleaning the standart output folder, cleaning
the gallery and clearing the queues. To avoid the loss of data, multiple confirmations are needed to start
a new session.
:return:
"""
root = tk.Tk()
root.withdraw()
while True:
if self.new_session:
if askyesno("New Session", "Are you sure you want to start a new session?"):
abort = False if os.listdir(self.upload_path) != [] else True
path = ""
while not abort:
path = asksaveasfilename()
if not path:
abort = askyesno("Cancel", "Are you sure you don't want to save the image files?")
else:
break
for window in self.active_gallery_windows:
window.clear_images()
self.token_obj.setValue(generate_token.generate_token())
for window in self.active_qr_code_windows:
window.set_qr_code()
self.image_queue.clear()
if not abort:
shutil.copytree(self.upload_path, path)
shutil.rmtree(self.upload_path)
os.mkdir(self.upload_path)
self.new_session = False
| 34.251029 | 118 | 0.606392 | from threading import Thread
import gui
from tools import clipboard, qrcode, generate_token
import os
import tempfile
import uuid
import time
import shutil
import tkinter as tk
from tkinter.filedialog import asksaveasfilename
from tkinter.messagebox import askyesno
class Gallery(Thread):
"""
The Gallery class starts a new window with the image gallery.
"""
def __init__(self, image_queue, upload_path, active_gallery_windows):
"""
The gallery class is going to be initialized.
:param image_queue: A queue where all incoming pictures are put in
:param upload_path: The path where the images are going to be stored
:param active_gallery_windows: A list of all currently active gallery windows
"""
Thread.__init__(self)
self.stop = False
self.image_queue = image_queue
self.upload_path = upload_path
self.active_gallery_windows = active_gallery_windows
def task(self):
"""
This task is going to be executed in the main UI thread. It handles the browser/window setup for the gallery
as well as the image to clipboard javascript binding. After that, it starts its own thread and runs in a loop.
:return:
"""
self.browser = gui.GUI.add_browser(
instance=self,
callback_func_name='on_window_close',
url="file:///resources/html/gallery.html",
window_title="ImageShare Gallery"
)
gui.GUI.set_javascript_bindings(
self.browser,
"image_to_clipboard",
clipboard.Clipboard.image_to_clipboard
)
self.start()
def clear_images(self):
"""
When this method is called, all previously displayed images from the gallery disappear. This happends for
example after a new session was started.
:return:
"""
gui.GUI.execute_javascript_func(
self.browser,
"clear"
)
def run(self):
"""
This method creates a thread which is looping continiously. Everytime a new Image enters the image queue,
it will be displayed on in the gallery window using a javascript function.
:return:
"""
time.sleep(.5)
while not self.stop:
if not self.image_queue.empty():
item = self.image_queue.get()
gui.GUI.execute_javascript_func(
self.browser,
"add_image",
item
)
self.image_queue.delete_queue()
def on_window_close(self):
"""
This callback is triggered when the window is going to be closed. It stops the main loop and removes the
window from the list of currently active windows.
:return:
"""
self.stop = True
self.active_gallery_windows.remove(self)
class QRCode(Thread):
"""
The QRCode class starts a new window with the qr code.
"""
def __init__(self, ip, port, token_obj, active_qr_code_windows):
"""
The QR code class is going to be initialized.
:param ip: The IP address the server is listing on. It is used to generate the QR code.
:param port: The post the server is listening on. It is used to generate the QR code.
:param token_obj: The object which contains the current auth token. It is used to generate the QR code.
:param active_qr_code_windows: A list of all currently active qr code windows
"""
Thread.__init__(self)
self.active_qr_code_windows = active_qr_code_windows
self.ip = ip
self.port = port
self.token_obj = token_obj
def task(self):
"""
This task is going to be executed in the main UI thread. It handles the browser/window setup for the qr code
window. After that, it starts its own thread and runs in a loop.
:return:
"""
self.browser = gui.GUI.add_browser(
url="file:///resources/html/qr_code.html",
callback_func_name='on_window_close',
window_title="ImageShare QR Connect"
)
self.start()
def generate_msg(self):
"""
This method generates the QR code message based on the ip, the port and the current token.
:return:
"""
return "http://{}:{}/?key={}".format(
self.ip,
self.port,
self.token_obj.obj)
def set_qr_code(self):
"""
This method generates the qr code message, generates the qr code and injects it into the active window via
a javascript function.
:return:
"""
path = os.path.join(tempfile.gettempdir(), "{}.png".format(uuid.uuid4())).replace("\\", "/")
qrcode.generate_qr_code(self.generate_msg(), path)
gui.GUI.execute_javascript_func(
self.browser,
"set_qr_code",
path
)
def run(self):
"""
This method starts a thread which will then asynchronously add the qr code to the window.
:return:
"""
self.set_qr_code()
def on_window_close(self):
"""
This callback gets executed when the qr code window is closed. It removes the window from the list of
currently active ones.
:return:
"""
self.active_qr_code_windows.remove(self)
class NewSession(Thread):
"""
When a new session should start, this class handles the whole procedure.
"""
def __init__(self, token_obj, upload_path, active_gallery_windows, active_qr_code_windows, image_queue):
"""
This method initializes all the pieces that are needed to start a new session.
:param token_obj: The object which contains the current auth token.
:param upload_path: The path where the images are going to be stored
:param active_gallery_windows: A list of all currently active gallery windows
:param active_qr_code_windows: A list of all currently active qr code windows
:param image_queue: A queue where all incoming pictures are put in
"""
Thread.__init__(self)
self.token_obj = token_obj
self.upload_path = upload_path
self.active_gallery_windows = active_gallery_windows
self.active_qr_code_windows = active_qr_code_windows
self.image_queue = image_queue
self.new_session = False
def start_new_session(self):
"""
When this method gets called, the mainloop of the thread opens a dialog to start a new session.
:return:
"""
self.new_session = True
def run(self):
"""
This method start a thread which contains a continous loop. It handles the creating of new sessions, the
generation of new tokens, saving the images to a new path, cleaning the standart output folder, cleaning
the gallery and clearing the queues. To avoid the loss of data, multiple confirmations are needed to start
a new session.
:return:
"""
root = tk.Tk()
root.withdraw()
while True:
if self.new_session:
if askyesno("New Session", "Are you sure you want to start a new session?"):
abort = False if os.listdir(self.upload_path) != [] else True
path = ""
while not abort:
path = asksaveasfilename()
if not path:
abort = askyesno("Cancel", "Are you sure you don't want to save the image files?")
else:
break
for window in self.active_gallery_windows:
window.clear_images()
self.token_obj.setValue(generate_token.generate_token())
for window in self.active_qr_code_windows:
window.set_qr_code()
self.image_queue.clear()
if not abort:
shutil.copytree(self.upload_path, path)
shutil.rmtree(self.upload_path)
os.mkdir(self.upload_path)
self.new_session = False
| 0 | 0 | 0 |
e98e9ebf2c5f8b253553d891592684f474c1e66c | 691 | py | Python | nodedata/migrations/0006_peer.py | bartromgens/bitcoinnodestats | 04f9f6b4e3c7d9dd236476c558357ea9353aa022 | [
"MIT"
] | 17 | 2016-05-12T20:49:10.000Z | 2020-04-07T07:28:50.000Z | nodedata/migrations/0006_peer.py | bartromgens/bitcoinnodestats | 04f9f6b4e3c7d9dd236476c558357ea9353aa022 | [
"MIT"
] | 7 | 2016-05-13T15:09:15.000Z | 2021-06-10T19:09:06.000Z | nodedata/migrations/0006_peer.py | bartromgens/bitcoinnodestats | 04f9f6b4e3c7d9dd236476c558357ea9353aa022 | [
"MIT"
] | 5 | 2016-05-13T10:11:49.000Z | 2020-04-10T22:32:38.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-12 00:31
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
| 27.64 | 114 | 0.616498 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-12 00:31
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('nodedata', '0005_rawnodedata_networkinfo_json'),
]
operations = [
migrations.CreateModel(
name='Peer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('peer_json', jsonfield.fields.JSONField()),
('datetime_created', models.DateTimeField(auto_now_add=True)),
],
),
]
| 0 | 489 | 23 |
d7ad1cdd8d8d266811acb2b8890656f37cb77f04 | 9,250 | py | Python | utils/pghelp.py | transfaeries/forest | e92ffcebf1b3adfebb8006b215e292973b7ca39d | [
"MIT"
] | null | null | null | utils/pghelp.py | transfaeries/forest | e92ffcebf1b3adfebb8006b215e292973b7ca39d | [
"MIT"
] | 1 | 2022-03-09T10:02:31.000Z | 2022-03-09T10:02:31.000Z | utils/pghelp.py | transfaeries/forest | e92ffcebf1b3adfebb8006b215e292973b7ca39d | [
"MIT"
] | null | null | null | #!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
import asyncio
import copy
import logging
import os
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Callable, Optional, Union
try:
import asyncpg
DUMMY = False
except ImportError:
from dummy_asyncpg import asyncpg
DUMMY = True
Loop = Optional[asyncio.events.AbstractEventLoop]
AUTOCREATE = "true" in os.getenv("AUTOCREATE_TABLES", "false").lower()
MAX_RESP_LOG_LEN = int(os.getenv("MAX_RESP_LOG_LEN", "256"))
LOG_LEVEL_DEBUG = bool(os.getenv("DEBUG", None))
pools: list[asyncpg.Pool] = []
class PGInterface:
"""Implements an abstraction for both sync and async PG requests:
- provided a map of method names to SQL query strings
- an optional database URI ( defaults to "")
- and an optional event loop"""
def __init__(
self, query_strings: PGExpressions, database: str = "", loop: Loop = None
) -> None:
"""Accepts a PGExpressions argument containing postgresql expressions, a database string, and an optional event loop."""
self.loop = loop or asyncio.get_event_loop()
self.database: Union[str, dict] = copy.deepcopy(
database
) # either a db uri or canned resps
self.queries = query_strings
self.table = self.queries.table
self.MAX_RESP_LOG_LEN = MAX_RESP_LOG_LEN
# self.loop.create_task(self.connect_pg())
self.pool = None
if isinstance(database, dict):
self.invocations: list[dict] = []
self.logger = get_logger(
f'{self.table}{"_fake" if not self.pool else ""}_interface'
)
def finish_init(self) -> None:
"""Optionally triggers creating tables and checks existence."""
if not self.pool:
self.logger.warning("RUNNING IN FAKE MODE")
if self.pool and self.table and not self.sync_exists():
if AUTOCREATE:
self.sync_create_table()
self.logger.warning(f"building table {self.table}")
else:
self.logger.warning(
f"not autocreating! table: {self.table} does not exist!"
)
for k in self.queries:
if AUTOCREATE and "create" in k and "index" in k:
self.logger.info(f"creating index via {k}")
self.__getattribute__(f"sync_{k}")()
async def execute(
self,
qstring: str,
*args: str,
) -> Optional[list[asyncpg.Record]]:
"""Invoke the asyncpg connection's `execute` given a provided query string and set of arguments"""
timeout: int = 180
if not self.pool and not isinstance(self.database, dict):
await self.connect_pg()
if self.pool:
async with self.pool.acquire() as connection:
# try:
# except asyncpg.TooManyConnectionsError:
# await connection.execute(
# """SELECT pg_terminate_backend(pg_stat_activity.pid)
# FROM pg_stat_activity
# WHERE pg_stat_activity.datname = 'postgres'
# AND pid <> pg_backend_pid();"""
# )
# return self.execute(qstring, *args, timeout=timeout)
# _execute takes query, args, limit, timeout
result = await connection._execute(
qstring, args, 0, timeout, return_status=True
)
# list[asyncpg.Record], str, bool
return result[0]
return None
def sync_execute(self, qstring: str, *args: Any) -> asyncpg.Record:
"""Synchronous wrapper for `self.execute`"""
ret = self.loop.run_until_complete(self.execute(qstring, *args))
return ret
def truncate(self, thing: str) -> str:
"""Logging helper. Truncates and formats."""
if len(thing) > self.MAX_RESP_LOG_LEN:
return (
f"{thing[:self.MAX_RESP_LOG_LEN]}..."
"[{len(thing)-self.MAX_RESP_LOG_LEN} omitted]"
)
return thing
def __getattribute__(self, key: str) -> Callable[..., asyncpg.Record]:
"""Implicitly define methods on this class for every statement in self.query_strings.
If method is prefaced with "sync_": wrap as a synchronous function call.
If statement in self.query_strings looks like an f-string, treat it
as such by evaling before passing to `executer`."""
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
if key.startswith(
"sync_"
): # sync_ prefix implicitly wraps query as synchronous
qstring = key.replace("sync_", "")
executer = self.sync_execute
else:
executer = self.execute
qstring = key
try:
statement = self.queries.get_query(qstring)
except KeyError as e:
raise ValueError(f"No statement of name {qstring} or {key} found!") from e
if not self.pool and isinstance(self.database, dict):
canned_response = self.database.get(qstring, [[None]]).pop(0)
if qstring in self.database and not self.database.get(qstring, []):
self.database.pop(qstring)
return return_canned
if "$1" in statement or "{" in statement and "}" in statement:
def executer_with_args(*args: Any) -> Any:
"""Closure over 'statement' in local state for application to arguments.
Allows deferred execution of f-strs, allowing PGExpresssions to operate on `args`."""
rebuilt_statement = eval(f'f"{statement}"') # pylint: disable=eval-used
if (
rebuilt_statement != statement
and "args" in statement
and "$1" not in statement
):
args = ()
resp = executer(rebuilt_statement, *args)
short_strresp = self.truncate(f"{resp}")
short_args = self.truncate(str(args))
self.logger.debug(
f"{rebuilt_statement} {short_args} -> {short_strresp}"
)
return resp
return executer_with_args
def executer_without_args() -> Any:
"""Closure over local state for executer without arguments."""
return executer(statement)
return executer_without_args
| 37.601626 | 128 | 0.585297 | #!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
import asyncio
import copy
import logging
import os
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Callable, Optional, Union
try:
import asyncpg
DUMMY = False
except ImportError:
from dummy_asyncpg import asyncpg
DUMMY = True
Loop = Optional[asyncio.events.AbstractEventLoop]
AUTOCREATE = "true" in os.getenv("AUTOCREATE_TABLES", "false").lower()
MAX_RESP_LOG_LEN = int(os.getenv("MAX_RESP_LOG_LEN", "256"))
LOG_LEVEL_DEBUG = bool(os.getenv("DEBUG", None))
def get_logger(name: str) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG if LOG_LEVEL_DEBUG else logging.INFO)
if not logger.hasHandlers():
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(sh)
return logger
pools: list[asyncpg.Pool] = []
async def close_pools() -> None:
for pool in pools:
try:
await pool.close()
except (asyncpg.PostgresError, asyncpg.InternalClientError) as e:
logging.error(e)
class SimpleInterface:
def __init__(self, database: str) -> None:
self.database = database
self.pool: Optional[asyncpg.Pool] = None
@asynccontextmanager
async def get_connection(self) -> AsyncGenerator:
if not self.pool:
self.pool = await asyncpg.create_pool(self.database)
pools.append(self.pool)
async with self.pool.acquire() as conn:
yield conn
class PGExpressions(dict):
def __init__(self, table: str = "", **kwargs: str) -> None:
self.table = table
self.logger = get_logger(f"{self.table}_expressions")
super().__init__(**kwargs)
if "exists" not in self:
self[
"exists"
] = f"SELECT * FROM pg_tables WHERE tablename = '{self.table}';"
if "create_table" not in self:
self.logger.warning(f"'create_table' not defined for {self.table}")
def get_query(self, key: str) -> str:
self.logger.debug(f"self.get invoked for {key}")
return dict.__getitem__(self, key).replace("{self.table}", self.table)
class PGInterface:
"""Implements an abstraction for both sync and async PG requests:
- provided a map of method names to SQL query strings
- an optional database URI ( defaults to "")
- and an optional event loop"""
def __init__(
self, query_strings: PGExpressions, database: str = "", loop: Loop = None
) -> None:
"""Accepts a PGExpressions argument containing postgresql expressions, a database string, and an optional event loop."""
self.loop = loop or asyncio.get_event_loop()
self.database: Union[str, dict] = copy.deepcopy(
database
) # either a db uri or canned resps
self.queries = query_strings
self.table = self.queries.table
self.MAX_RESP_LOG_LEN = MAX_RESP_LOG_LEN
# self.loop.create_task(self.connect_pg())
self.pool = None
if isinstance(database, dict):
self.invocations: list[dict] = []
self.logger = get_logger(
f'{self.table}{"_fake" if not self.pool else ""}_interface'
)
def finish_init(self) -> None:
"""Optionally triggers creating tables and checks existence."""
if not self.pool:
self.logger.warning("RUNNING IN FAKE MODE")
if self.pool and self.table and not self.sync_exists():
if AUTOCREATE:
self.sync_create_table()
self.logger.warning(f"building table {self.table}")
else:
self.logger.warning(
f"not autocreating! table: {self.table} does not exist!"
)
for k in self.queries:
if AUTOCREATE and "create" in k and "index" in k:
self.logger.info(f"creating index via {k}")
self.__getattribute__(f"sync_{k}")()
async def connect_pg(self) -> None:
self.pool = await asyncpg.create_pool(self.database)
pools.append(self.pool)
async def execute(
self,
qstring: str,
*args: str,
) -> Optional[list[asyncpg.Record]]:
"""Invoke the asyncpg connection's `execute` given a provided query string and set of arguments"""
timeout: int = 180
if not self.pool and not isinstance(self.database, dict):
await self.connect_pg()
if self.pool:
async with self.pool.acquire() as connection:
# try:
# except asyncpg.TooManyConnectionsError:
# await connection.execute(
# """SELECT pg_terminate_backend(pg_stat_activity.pid)
# FROM pg_stat_activity
# WHERE pg_stat_activity.datname = 'postgres'
# AND pid <> pg_backend_pid();"""
# )
# return self.execute(qstring, *args, timeout=timeout)
# _execute takes query, args, limit, timeout
result = await connection._execute(
qstring, args, 0, timeout, return_status=True
)
# list[asyncpg.Record], str, bool
return result[0]
return None
def sync_execute(self, qstring: str, *args: Any) -> asyncpg.Record:
"""Synchronous wrapper for `self.execute`"""
ret = self.loop.run_until_complete(self.execute(qstring, *args))
return ret
def sync_close(self) -> Any:
self.logger.info(f"closing connection: {self.pool}")
if self.pool:
ret = self.loop.run_until_complete(self.pool.close())
return ret
return None
def truncate(self, thing: str) -> str:
"""Logging helper. Truncates and formats."""
if len(thing) > self.MAX_RESP_LOG_LEN:
return (
f"{thing[:self.MAX_RESP_LOG_LEN]}..."
"[{len(thing)-self.MAX_RESP_LOG_LEN} omitted]"
)
return thing
def __getattribute__(self, key: str) -> Callable[..., asyncpg.Record]:
"""Implicitly define methods on this class for every statement in self.query_strings.
If method is prefaced with "sync_": wrap as a synchronous function call.
If statement in self.query_strings looks like an f-string, treat it
as such by evaling before passing to `executer`."""
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
if key.startswith(
"sync_"
): # sync_ prefix implicitly wraps query as synchronous
qstring = key.replace("sync_", "")
executer = self.sync_execute
else:
executer = self.execute
qstring = key
try:
statement = self.queries.get_query(qstring)
except KeyError as e:
raise ValueError(f"No statement of name {qstring} or {key} found!") from e
if not self.pool and isinstance(self.database, dict):
canned_response = self.database.get(qstring, [[None]]).pop(0)
if qstring in self.database and not self.database.get(qstring, []):
self.database.pop(qstring)
def return_canned(*args: Any, **kwargs: Any) -> Any:
self.invocations.append({qstring: (args, kwargs)})
if callable(canned_response):
resp = canned_response(*args, **kwargs)
else:
resp = canned_response
short_strresp = self.truncate(f"{resp}")
self.logger.info(
f"returning `{short_strresp}` for expression: "
f"`{qstring}` eval'd with `{args}` & `{kwargs}`"
)
return resp
return return_canned
if "$1" in statement or "{" in statement and "}" in statement:
def executer_with_args(*args: Any) -> Any:
"""Closure over 'statement' in local state for application to arguments.
Allows deferred execution of f-strs, allowing PGExpresssions to operate on `args`."""
rebuilt_statement = eval(f'f"{statement}"') # pylint: disable=eval-used
if (
rebuilt_statement != statement
and "args" in statement
and "$1" not in statement
):
args = ()
resp = executer(rebuilt_statement, *args)
short_strresp = self.truncate(f"{resp}")
short_args = self.truncate(str(args))
self.logger.debug(
f"{rebuilt_statement} {short_args} -> {short_strresp}"
)
return resp
return executer_with_args
def executer_without_args() -> Any:
"""Closure over local state for executer without arguments."""
return executer(statement)
return executer_without_args
| 2,316 | 84 | 234 |
5b9e759af14b63b1adeea004fac45e99e0507638 | 6,679 | py | Python | src/modules/php/visitors/resolvers.py | Mause/PHP-Parsers | 9fac9827fa34a48e1d514520bb7b8be7c0fd2156 | [
"MIT"
] | 11 | 2020-06-27T12:46:32.000Z | 2022-02-20T00:08:50.000Z | src/modules/php/visitors/resolvers.py | Mause/PHP-Parsers | 9fac9827fa34a48e1d514520bb7b8be7c0fd2156 | [
"MIT"
] | null | null | null | src/modules/php/visitors/resolvers.py | Mause/PHP-Parsers | 9fac9827fa34a48e1d514520bb7b8be7c0fd2156 | [
"MIT"
] | 5 | 2020-06-28T21:42:36.000Z | 2022-02-20T00:11:43.000Z | """Visitors that make certain mutations to the AST"""
import sys
import os
from src.modules.php import syntax_tree
from src.modules.php.base import Visitor
from src.compiler.php import phpast
class DependencyResolver(Visitor):
"""Expands the tree by augmenting the ASTs for files added in it using
Include and Require tags
Should preceed any visitors that depend on it.
"""
def evaluate_require(self, expr):
"""
Takes the 'expr' block of a require/include call and
reduces it to the actual string produced from that expression
"""
if isinstance(expr, str):
return expr
elif isinstance(expr, phpast.BinaryOp):
if expr.op == ".":
return self.evaluate_require(expr.left) + self.evaluate_require(expr.right)
elif isinstance(expr, phpast.Constant):
const_value = self.constants.get(expr.name)
if not isinstance(const_value, str):
return "[PATH]"
if const_value is None:
const_value = "[PATH]"
self.expr_fails.append((expr, expr.lineno, self.current_file_tree.file_path ))
return const_value
else:
self.expr_fails.append((expr, expr.lineno, self.current_file_tree.file_path ))
return "[PATH]"
| 39.288235 | 116 | 0.634077 | """Visitors that make certain mutations to the AST"""
import sys
import os
from src.modules.php import syntax_tree
from src.modules.php.base import Visitor
from src.compiler.php import phpast
class CircularImport(phpast.Node):
fields = ["file_name"]
def __init__(self, child_path, looping_tree):
self.looping_tree = looping_tree
self.file_name = os.path.basename(child_path)
class DependencyResolver(Visitor):
"""Expands the tree by augmenting the ASTs for files added in it using
Include and Require tags
Should preceed any visitors that depend on it.
"""
def __init__(self, debug=False):
self.namespace_stack = []
self.constants = {}
self.expr_fails = []
self.not_found = []
self.debug = debug
self.current_file_tree = None
def visit(self, current_node):
if isinstance(current_node, phpast.Include) or isinstance(current_node, phpast.Require):
# Get the last file visited by getting the last SyntaxTree element
# of the namespace_stack.
# This is required to properly form the absolute path for the
# file in the Include/Require
current_tree_stack = [tree for tree in self.namespace_stack if isinstance(tree, syntax_tree.SyntaxTree)]
self.current_file_tree = current_tree_stack[-1]
# Get the Syntax tree for the node (Include/Require) and augment it
# to the node by using previous file included as base path
file_to_build = self.evaluate_require(current_node.expr)
dependency_path = os.path.join(self.current_file_tree.file_location, file_to_build)
dependency_path = os.path.normpath(dependency_path)
# Counter Circular Imports
for file_tree in current_tree_stack:
if dependency_path == file_tree.file_path:
ci_node = CircularImport(child_path=dependency_path, looping_tree=file_tree)
current_node.body = ci_node
break
else:
self.follow_dependency(current_node, dependency_path)
elif isinstance(current_node, phpast.FunctionCall):
if current_node.name == "define":
constant_name = current_node.params[0].node
try:
constant_value = current_node.params[1].node
self.constants[constant_name] = constant_value
except:
pass
def register_with(self, traverser):
# Add a reference to Traverser's namespace stack so that we can
# access it from this visitor
self.namespace_stack = traverser.namespace_stack
def follow_dependency(self, node, dependency_path):
if not os.path.isfile(dependency_path):
dp_file = os.path.basename(dependency_path)
if self.debug:
print("Require/Include Error")
print(f"File {dp_file} does not exist")
print(f"Line: {node.lineno}")
self.not_found.append((dependency_path, node.lineno,
self.current_file_tree.file_path))
else:
file_handle = open(dependency_path, "r")
sub_ast = syntax_tree.SyntaxTree(file_handle)
node.body = sub_ast
def evaluate_require(self, expr):
"""
Takes the 'expr' block of a require/include call and
reduces it to the actual string produced from that expression
"""
if isinstance(expr, str):
return expr
elif isinstance(expr, phpast.BinaryOp):
if expr.op == ".":
return self.evaluate_require(expr.left) + self.evaluate_require(expr.right)
elif isinstance(expr, phpast.Constant):
const_value = self.constants.get(expr.name)
if not isinstance(const_value, str):
return "[PATH]"
if const_value is None:
const_value = "[PATH]"
self.expr_fails.append((expr, expr.lineno, self.current_file_tree.file_path ))
return const_value
else:
self.expr_fails.append((expr, expr.lineno, self.current_file_tree.file_path ))
return "[PATH]"
class ResourceDependencyResolver(DependencyResolver):
def __init__(self, resource_tree_root, debug=False):
self.rt_root = resource_tree_root
self.namespace_stack = []
self.not_found = []
self.expr_fails = []
self.constants = {}
self.debug = debug
def follow_dependency(self, node, dependency_path, debug=False):
try:
sub_ast = self.rt_root.trees[dependency_path]
node.body = sub_ast
except:
if self.debug:
print(f"File not found at {dependency_path}")
self.not_found.append((dependency_path, node.lineno, self.current_file_tree.file_path))
def visit(self, current_node):
file_stack = [tree for tree in self.namespace_stack if \
isinstance(tree, syntax_tree.SyntaxTree)]
if file_stack:
last_file = file_stack[-1]
super().visit(current_node) # Connects the Included ASTs
if type(current_node) in (phpast.Include, phpast.Require):
if isinstance(current_node.body, syntax_tree.SyntaxTree):
self.rt_root.dep_table[last_file.file_path].append(current_node.body)
class TablesBuilder(Visitor):
def __init__(self, rt_root):
self.rt_root = rt_root
self.namespace_stack = []
def register_with(self, traverser):
self.namespace_stack = traverser.namespace_stack
def visit(self, current_node):
file_stack = [tree for tree in self.namespace_stack if \
isinstance(tree, syntax_tree.SyntaxTree)]
if not file_stack: return # Don't go further if we are not in a file yet
last_file = file_stack[-1]
if isinstance(current_node, phpast.Function):
self.rt_root.function_table[last_file.file_path][current_node.name] = current_node
elif isinstance(current_node, phpast.Method):
last_ns_node = self.namespace_stack[-1]
if isinstance(last_ns_node, phpast.Class):
self.rt_root.method_table[last_file.file_path][current_node.name] = (current_node, last_ns_node)
elif type(current_node) in (phpast.Include, phpast.Require):
if isinstance(current_node.body, syntax_tree.SyntaxTree):
self.rt_root.dep_table[last_file.file_path].append(current_node.body)
| 4,888 | 106 | 337 |
e3187245c38ccf41c17c623c9a0298ed26314124 | 547 | py | Python | decider.py | matez0/decide-raw | 1ad4aeb80539b2713eae62466d6672dd0d7711d6 | [
"MIT"
] | null | null | null | decider.py | matez0/decide-raw | 1ad4aeb80539b2713eae62466d6672dd0d7711d6 | [
"MIT"
] | null | null | null | decider.py | matez0/decide-raw | 1ad4aeb80539b2713eae62466d6672dd0d7711d6 | [
"MIT"
] | null | null | null | # coding: utf-8
#
# Copyright (c) 2019 Zoltán Máté
# All Rights Reserved.
#
# Author: Zoltán Máté <mate.zoltan0@gmail.com>
#
| 24.863636 | 81 | 0.674589 | # coding: utf-8
#
# Copyright (c) 2019 Zoltán Máté
# All Rights Reserved.
#
# Author: Zoltán Máté <mate.zoltan0@gmail.com>
#
def score(decider, values):
future_values = list(values)
orig_value = future_values.pop(0)
past_values = []
while future_values:
past_values.append(future_values.pop(0))
if decider.decide(orig_value, past_values):
return past_values[-1] - orig_value
return 0
def fitness(decider, values):
return sum((score(decider, values[orig:]) for orig in range(0, len(values))))
| 375 | 0 | 46 |
40502073ddc75e5ccf9c6199875480768d1155af | 1,367 | py | Python | wezer_mail/utils.py | Abdur-rahmaanJ/wezer-mail | 6070ee45ae872961f10553a5946048cb74155ea3 | [
"MIT"
] | null | null | null | wezer_mail/utils.py | Abdur-rahmaanJ/wezer-mail | 6070ee45ae872961f10553a5946048cb74155ea3 | [
"MIT"
] | null | null | null | wezer_mail/utils.py | Abdur-rahmaanJ/wezer-mail | 6070ee45ae872961f10553a5946048cb74155ea3 | [
"MIT"
] | null | null | null | # send mail function
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
import time
from jinja2 import Environment, FileSystemLoader
import configparser | 26.803922 | 61 | 0.678127 | # send mail function
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
import time
from jinja2 import Environment, FileSystemLoader
import configparser
def send_mail(to_, subject_, body_):
config = configparser.ConfigParser()
config.read('settings.cfg')
MYMAIL = config['mail']['user']
MYPASS = config['mail']['password']
fromaddr =MYMAIL
toaddr=to_
thesub=subject_
thebody=body_
thepassword=MYPASS
domsmtp='smtp.gmail.com'
smtpport= 587 #needs integer not string
msg = MIMEMultipart('alt text here')
msg.set_charset('utf8')
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = Header(thesub,'utf8')
_attach = MIMEText(thebody.encode('utf8'),'html','UTF-8')
msg.attach(_attach)
server = smtplib.SMTP(domsmtp, smtpport)
server.starttls()
server.login(fromaddr, thepassword)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
print('mail sent')
def todays_date():
return time.strftime("%d/%m/%Y")
def template(filename_):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template(filename_)
#output = template.render()
return template | 1,062 | 0 | 73 |
83203dfab91326ca83bc4bf21da8b4b84b3751a4 | 3,007 | py | Python | apps/locations/models.py | rapidsms/rapidsms-legacy | 43c2ecd41fd1541a2538326edee3d9e816d84529 | [
"BSD-3-Clause"
] | null | null | null | apps/locations/models.py | rapidsms/rapidsms-legacy | 43c2ecd41fd1541a2538326edee3d9e816d84529 | [
"BSD-3-Clause"
] | null | null | null | apps/locations/models.py | rapidsms/rapidsms-legacy | 43c2ecd41fd1541a2538326edee3d9e816d84529 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T19:35:54.000Z | 2019-11-02T19:35:54.000Z | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.db import models
from rapidsms.webui.managers import *
class Location(models.Model):
"""A Location is technically a geopgraphical point (lat+long), but is often
used to represent a large area such as a city or state. It is recursive
via the _parent_ field, which can be used to create a hierachy (Country
-> State -> County -> City) in combination with the _type_ field."""
objects = RecursiveManager()
type = models.ForeignKey(LocationType, related_name="locations", blank=True, null=True)
name = models.CharField(max_length=100, help_text="Name of location")
code = models.CharField(max_length=30, unique=True)
parent = models.ForeignKey("Location", related_name="children", null=True, blank=True,
help_text="The parent of this Location. Although it is not enforced, it" +\
"is expected that the parent will be of a different LocationType")
latitude = models.DecimalField(max_digits=8, decimal_places=6, blank=True, null=True, help_text="The physical latitude of this location")
longitude = models.DecimalField(max_digits=8, decimal_places=6, blank=True, null=True, help_text="The physical longitude of this location")
# TODO: how can we port the Location.contacts and Location.one_contact
# methods, now that the locations app has been split from reporters?
# even if they can import one another, they can't know if they're
# both running at parse time, and can't monkey-patch later.
def ancestors(self, include_self=False):
"""Returns all of the parent locations of this location,
optionally including itself in the output. This is
very inefficient, so consider caching the output."""
locs = [self] if include_self else []
loc = self
# keep on iterating
# until we return
while True:
locs.append(loc)
loc = loc.parent
# are we at the top?
if loc is None:
return locs
def descendants(self, include_self=False):
"""Returns all of the locations which are descended from this location,
optionally including itself in the output. This is very inefficient
(it recurses once for EACH), so consider caching the output."""
locs = [self] if include_self else []
for loc in self.children.all():
locs.extend(loc.descendants(True))
return locs
| 37.5875 | 143 | 0.645494 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.db import models
from rapidsms.webui.managers import *
class LocationType(models.Model):
name = models.CharField(max_length=100)
class Meta:
verbose_name = "Type"
def __unicode__(self):
return self.name
class Location(models.Model):
"""A Location is technically a geopgraphical point (lat+long), but is often
used to represent a large area such as a city or state. It is recursive
via the _parent_ field, which can be used to create a hierachy (Country
-> State -> County -> City) in combination with the _type_ field."""
objects = RecursiveManager()
type = models.ForeignKey(LocationType, related_name="locations", blank=True, null=True)
name = models.CharField(max_length=100, help_text="Name of location")
code = models.CharField(max_length=30, unique=True)
parent = models.ForeignKey("Location", related_name="children", null=True, blank=True,
help_text="The parent of this Location. Although it is not enforced, it" +\
"is expected that the parent will be of a different LocationType")
latitude = models.DecimalField(max_digits=8, decimal_places=6, blank=True, null=True, help_text="The physical latitude of this location")
longitude = models.DecimalField(max_digits=8, decimal_places=6, blank=True, null=True, help_text="The physical longitude of this location")
def __unicode__(self):
return self.name
# TODO: how can we port the Location.contacts and Location.one_contact
# methods, now that the locations app has been split from reporters?
# even if they can import one another, they can't know if they're
# both running at parse time, and can't monkey-patch later.
def one_contact(self, role, display=False):
return "Mr. Fixme"
def contacts(self, role=None):
return Location.objects.get(pk=2)
def ancestors(self, include_self=False):
"""Returns all of the parent locations of this location,
optionally including itself in the output. This is
very inefficient, so consider caching the output."""
locs = [self] if include_self else []
loc = self
# keep on iterating
# until we return
while True:
locs.append(loc)
loc = loc.parent
# are we at the top?
if loc is None:
return locs
def descendants(self, include_self=False):
"""Returns all of the locations which are descended from this location,
optionally including itself in the output. This is very inefficient
(it recurses once for EACH), so consider caching the output."""
locs = [self] if include_self else []
for loc in self.children.all():
locs.extend(loc.descendants(True))
return locs
| 152 | 143 | 107 |
4dee88957fb6c3f5eeefee590940ef8910725319 | 73 | py | Python | fisherman/overlap/all.py | BorjaRequena/Quantum-Fisherman | 57e38bfeb7b184bc60d200030a5a673e5f06fb62 | [
"Apache-2.0"
] | 6 | 2021-05-05T13:59:17.000Z | 2021-12-11T06:06:30.000Z | fisherman/overlap/all.py | BorjaRequena/Quantum-Fisherman | 57e38bfeb7b184bc60d200030a5a673e5f06fb62 | [
"Apache-2.0"
] | null | null | null | fisherman/overlap/all.py | BorjaRequena/Quantum-Fisherman | 57e38bfeb7b184bc60d200030a5a673e5f06fb62 | [
"Apache-2.0"
] | null | null | null | from .randomized import *
from .swap import *
from .comp_uncomp import *
| 18.25 | 26 | 0.753425 | from .randomized import *
from .swap import *
from .comp_uncomp import *
| 0 | 0 | 0 |
0eae708db50a092a1f792e3bbbd1ae39f97467ef | 6,730 | py | Python | kit_django/restAPICore/views.py | safakoner/kit | aec36a70137febfb5f3e3a9205ea58879736eea4 | [
"MIT"
] | 6 | 2020-06-29T20:36:15.000Z | 2021-09-08T23:34:01.000Z | kit_django/restAPICore/views.py | safakoner/kit | aec36a70137febfb5f3e3a9205ea58879736eea4 | [
"MIT"
] | 9 | 2021-03-30T13:46:29.000Z | 2022-03-12T00:38:27.000Z | kit_django/restAPICore/views.py | safakoner/kit | aec36a70137febfb5f3e3a9205ea58879736eea4 | [
"MIT"
] | 1 | 2020-07-20T18:40:24.000Z | 2020-07-20T18:40:24.000Z | #
# ----------------------------------------------------------------------------------------------------
# DESCRIPTION
# ----------------------------------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------------------------------
# IMPORTS
# ----------------------------------------------------------------------------------------------------
from django.http import Http404
from rest_framework import status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from userAccount.authentications import UserAccountAuthentication
from userAccount.permissions import UserAccountSuperUserPermission
#
# ----------------------------------------------------------------------------------------------------
# CODE
# ----------------------------------------------------------------------------------------------------
#
## @brief [ REST FRAMEWORK API VIEW CLASS ] - REST framework simple collection API view class.
#
## @brief [ REST FRAMEWORK API VIEW CLASS ] - REST framework simple detail API view class. | 37.597765 | 120 | 0.502823 | #
# ----------------------------------------------------------------------------------------------------
# DESCRIPTION
# ----------------------------------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------------------------------
# IMPORTS
# ----------------------------------------------------------------------------------------------------
from django.http import Http404
from rest_framework import status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from userAccount.authentications import UserAccountAuthentication
from userAccount.permissions import UserAccountSuperUserPermission
#
# ----------------------------------------------------------------------------------------------------
# CODE
# ----------------------------------------------------------------------------------------------------
#
## @brief [ REST FRAMEWORK API VIEW CLASS ] - REST framework simple collection API view class.
class SimpleCollectionAPIView(APIView):
## [ tuple ] - Authentication classes.
authentication_classes = (UserAccountAuthentication,)
## [ tuple ] - Permission classes.
permission_classes = (UserAccountSuperUserPermission,)
## [ django.db.models.Model ] - Model class object.
MODEL_CLASS = None
## [ rest_framework.serializers.ModelSerializer ] - REST framework model serializer class object.
MODEL_CLASS_SERIALIZER = None
#
# ------------------------------------------------------------------------------------------------
# PUBLIC METHODS
# ------------------------------------------------------------------------------------------------
#
## @brief Get collection.
#
# @param request [ rest_framework.request.Request | None | in ] - Request.
# @param format [ str | None | in ] - Format.
#
# @exception N/A
#
# @return rest_framework.response.Response - Response.
def get(self, request, format=None):
_objects = None
if hasattr(self.MODEL_CLASS, 'is_active'):
_objects = self.MODEL_CLASS.objects.filter(is_active=True)
else:
_objects = self.MODEL_CLASS.objects.filter()
paginator = LimitOffsetPagination()
result = paginator.paginate_queryset(_objects, request)
serializer = self.MODEL_CLASS_SERIALIZER(result,
many=True,
context={'request': request}
)
return Response(serializer.data, status=status.HTTP_200_OK)
#
## @brief Post.
#
# @param request [ rest_framework.request.Request | None | in ] - Request.
# @param format [ str | None | in ] - Format.
#
# @exception N/A
#
# @return rest_framework.response.Response - Response.
def post(self, request, format=None):
serializer = self.MODEL_CLASS_SERIALIZER(data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
## @brief [ REST FRAMEWORK API VIEW CLASS ] - REST framework simple detail API view class.
class SimpleDetailAPIView(APIView):
## [ tuple ] - Authentication classes.
authentication_classes = (UserAccountAuthentication,)
## [ tuple ] - Permission classes.
permission_classes = (UserAccountSuperUserPermission,)
## [ django.db.models.Model ] - Model class object.
MODEL_CLASS = None
## [ rest_framework.serializers.ModelSerializer ] - REST framework model serializer class object.
MODEL_CLASS_SERIALIZER = None
#
# ------------------------------------------------------------------------------------------------
# PUBLIC METHODS
# ------------------------------------------------------------------------------------------------
#
## @brief Get object with given PK.
#
# @param pk [ int | None | in ] - Primary key.
#
# @exception django.http.Http404 - If object with given PK doesn't exist.
#
# @return django.db.models.Model - Model instance.
def get_object(self, pk):
try:
return self.MODEL_CLASS.objects.get(pk=pk)
except self.MODEL_CLASS.DoesNotExist:
raise Http404
#
## @brief Get detail.
#
# @param request [ rest_framework.request.Request | None | in ] - Request.
# @param pk [ int | None | in ] - Primary key.
# @param format [ str | None | in ] - Format.
#
# @exception N/A
#
# @return rest_framework.response.Response - Response.
def get(self, request, pk, format=None):
_object = self.get_object(pk)
serializer = self.MODEL_CLASS_SERIALIZER(_object, context={'request': request})
return Response(serializer.data)
#
## @brief Patch detail.
#
# @param request [ rest_framework.request.Request | None | in ] - Request.
# @param pk [ int | None | in ] - Primary key.
# @param format [ str | None | in ] - Format.
#
# @exception N/A
#
# @return rest_framework.response.Response - Response.
def patch(self, request, pk, format=None):
_object = self.get_object(pk)
serializer = self.MODEL_CLASS_SERIALIZER(_object, data=request.data, context={'request':request}, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
## @brief Delete detail.
#
# @param request [ rest_framework.request.Request | None | in ] - Request.
# @param pk [ int | None | in ] - Primary key.
# @param format [ str | None | in ] - Format.
#
# @exception N/A
#
# @return rest_framework.response.Response - Response.
def delete(self, request, pk, format=None):
_object = self.get_object(pk)
_object.delete()
return Response({'id': pk}, status=status.HTTP_200_OK) | 1,901 | 3,522 | 44 |
18670e302470b64e0aff41fcdc84db343061d262 | 11,285 | py | Python | model-optimizer/extensions/front/tf/CTCLossReplacement.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 2 | 2021-02-26T15:46:19.000Z | 2021-05-16T20:48:13.000Z | model-optimizer/extensions/front/tf/CTCLossReplacement.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 30 | 2020-11-13T11:44:07.000Z | 2022-02-21T13:03:16.000Z | model-optimizer/extensions/front/tf/CTCLossReplacement.py | mmakridi/openvino | 769bb7709597c14debdaa356dd60c5a78bdfa97e | [
"Apache-2.0"
] | 1 | 2020-12-18T15:47:45.000Z | 2020-12-18T15:47:45.000Z | """
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import logging as log
from extensions.ops.Cast import Cast
from extensions.ops.ctc_greedy_decoder import CTCGreedyDecoderOp
from extensions.ops.ctc_loss import CTCLoss
from extensions.ops.elementwise import Equal
from extensions.ops.parameter import Parameter
from extensions.ops.ReduceOps import ReduceSum
from extensions.ops.select import Select
from extensions.ops.transpose import Transpose
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, rename_nodes
from mo.middle.passes.convert_data_type import data_type_str_to_np
from mo.ops.broadcast import Broadcast
from mo.ops.shape import Shape
from mo.ops.squeeze import Squeeze
from mo.utils.error import Error
class CTCLossReplacement(FrontReplacementSubgraph):
"""
The CTCLoss appears along with CTCGreedyDecoder operation in particular. Since the TensorFlow* CTCGreedyDecoder
outputs sparse tensor format, the OpenVINO CTCGreedyDecoder has a different format and the CTCLoss is also affected
in terms of different format for its inputs. So the corresponding sub-graph with CTCGreedyDecoding and CTCLoss
must be transformed properly.
Also, the transformation changes the input sequence length format into a mask format. For example, 1D tensor of
sequence lengths equal to [4 2] is coded as 2D tensor [[1 1 1 1 0], [1 1 0 0 0]] with a time dimension is
equal to 5.
"""
enabled = True
| 60.347594 | 127 | 0.652193 | """
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import logging as log
from extensions.ops.Cast import Cast
from extensions.ops.ctc_greedy_decoder import CTCGreedyDecoderOp
from extensions.ops.ctc_loss import CTCLoss
from extensions.ops.elementwise import Equal
from extensions.ops.parameter import Parameter
from extensions.ops.ReduceOps import ReduceSum
from extensions.ops.select import Select
from extensions.ops.transpose import Transpose
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, rename_nodes
from mo.middle.passes.convert_data_type import data_type_str_to_np
from mo.ops.broadcast import Broadcast
from mo.ops.shape import Shape
from mo.ops.squeeze import Squeeze
from mo.utils.error import Error
class CTCLossReplacement(FrontReplacementSubgraph):
"""
The CTCLoss appears along with CTCGreedyDecoder operation in particular. Since the TensorFlow* CTCGreedyDecoder
outputs sparse tensor format, the OpenVINO CTCGreedyDecoder has a different format and the CTCLoss is also affected
in terms of different format for its inputs. So the corresponding sub-graph with CTCGreedyDecoding and CTCLoss
must be transformed properly.
Also, the transformation changes the input sequence length format into a mask format. For example, 1D tensor of
sequence lengths equal to [4 2] is coded as 2D tensor [[1 1 1 1 0], [1 1 0 0 0]] with a time dimension is
equal to 5.
"""
enabled = True
def run_before(self):
from extensions.front.tf.CTCGreedyDecoderReplacement import CTCGreedyDecoderReplacement
return [CTCGreedyDecoderReplacement]
def pattern(self):
return dict(
nodes=[
('seq_len', dict(op='Parameter')),
('transpose', dict(op='Transpose')),
('ctc_greedy_decoder', dict(op='CTCGreedyDecoder')),
('cast', dict(op='Cast')),
('sparse_to_dense', dict(op='SparseToDense')),
('const', dict(op='Const')),
('ctc_loss', dict(op='CTCLoss')),
],
edges=[
('seq_len', 'ctc_greedy_decoder', {'out': 0, 'in': 1}),
('seq_len', 'ctc_loss', {'out': 0, 'in': 3}),
('transpose', 'ctc_greedy_decoder', {'out': 0, 'in': 0}),
('transpose', 'ctc_loss', {'out': 0, 'in': 0}),
('ctc_greedy_decoder', 'sparse_to_dense', {'out': 0, 'in': 0}),
('ctc_greedy_decoder', 'sparse_to_dense', {'out': 2, 'in': 1}),
('ctc_greedy_decoder', 'sparse_to_dense', {'out': 1, 'in': 2}),
('const', 'sparse_to_dense', {'out': 0, 'in': 3}),
('ctc_greedy_decoder', 'cast', {'out': 1, 'in': 0}),
('ctc_greedy_decoder', 'ctc_loss', {'out': 0, 'in': 1}),
('cast', 'ctc_loss', {'out': 0, 'in': 2})
])
def replace_sub_graph(self, graph: Graph, match: dict):
seq_len_tf = match['seq_len']
transpose_tf = match['transpose']
ctc_greedy_decoder_tf = match['ctc_greedy_decoder']
cast_tf = match['cast']
ctc_loss_tf = match['ctc_loss']
sparse_to_dense_tf = match['sparse_to_dense']
output_sparse_to_dense_name = sparse_to_dense_tf.soft_get('name', sparse_to_dense_tf.id)
output_ctc_loss_name = ctc_loss_tf.soft_get('name', ctc_loss_tf.id)
ctc_greedy_decoder_tf_name = ctc_greedy_decoder_tf.soft_get('name', ctc_greedy_decoder_tf.id)
log.debug('Found CTCLossFrontReplacer pattern after {} with name {}'.format(ctc_greedy_decoder_tf.op,
ctc_greedy_decoder_tf.name))
# create sequence mask node, sub-graph for transforming into sequence length and connect with consumers
seq_len_tf_shape = seq_len_tf.soft_get('shape', None)
if seq_len_tf_shape is None or len(seq_len_tf_shape) != 2:
raise Error('The sequence length that is the second input to the CTCGreedyDecoder node "{}"'
' must be specified in a mask format.'.format(ctc_greedy_decoder_tf_name))
log.error('The format of input sequence length has been changed to a mask format', extra={'is_warning': True})
seq_len_tf_type = seq_len_tf.soft_get('data_type', None)
seq_len_tf_name = seq_len_tf.soft_get('name', seq_len_tf.id)
seq_mask_placeholder = Parameter(graph, {'name': seq_len_tf_name, 'shape': seq_len_tf_shape,
'data_type': seq_len_tf_type}).create_node()
reduce_to_seq_len_node = create_op_with_const_inputs(graph, ReduceSum, {1: np.array(1, dtype=np.int32)},
{'name': seq_len_tf_name + '/ReduceToSeqLen',
'keep_dims': False})
reduce_to_seq_len_node.in_port(0).connect(seq_mask_placeholder.out_port(0))
seq_len_tf.out_port(0).get_connection().set_source(reduce_to_seq_len_node.out_port(0))
cast_fp_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)
casted_seq_mask_node = Cast(graph, {'name': seq_len_tf_name + '/CastToFP32', 'dst_type': cast_fp_type}).create_node()
casted_seq_mask_node.in_port(0).connect(seq_mask_placeholder.out_port(0))
permuted_casted_seq_mask = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0])},
{'name': seq_len_tf_name + '/Permute'})
permuted_casted_seq_mask.in_port(0).connect(casted_seq_mask_node.out_port(0))
rename_nodes([(seq_len_tf, seq_len_tf_name + '/AbandonedName'), (seq_mask_placeholder, seq_len_tf_name)])
# create CTCGreedyDecoder node and set mask node
ctc_merge_repeated_i = ctc_greedy_decoder_tf.soft_get('ctc_merge_repeated', ctc_greedy_decoder_tf.id)
ctc_greedy_decoder = CTCGreedyDecoderOp(graph, {'name': output_sparse_to_dense_name,
'ctc_merge_repeated': ctc_merge_repeated_i}).create_node()
ctc_greedy_decoder.in_port(1).connect(permuted_casted_seq_mask.out_port(0))
rename_nodes([(sparse_to_dense_tf, output_sparse_to_dense_name + '/AbandonedName'),
(ctc_greedy_decoder, output_sparse_to_dense_name)])
# create CTCLoss node and set attributes
assert ctc_loss_tf.has_valid('preprocess_collapse_repeated'), \
'The CTCLoss node "{}" misses "preprocess_collapse_repeated" attribute'.format(output_ctc_loss_name)
assert ctc_loss_tf.has_valid('ctc_merge_repeated'), \
'The CTCLoss node "{}" misses "ctc_merge_repeated" attribute'.format(output_ctc_loss_name)
assert ctc_loss_tf.has_valid('unique'), \
'The CTCLoss node "{}" misses "unique" attribute'.format(output_ctc_loss_name)
preprocess_collapse_repeated = ctc_loss_tf.preprocess_collapse_repeated
ctc_merge_repeated = ctc_loss_tf.ctc_merge_repeated
unique = ctc_loss_tf.unique
ctc_loss = CTCLoss(graph, {'name': output_ctc_loss_name,
'preprocess_collapse_repeated': preprocess_collapse_repeated,
'ctc_merge_repeated': ctc_merge_repeated,
'unique': unique}).create_node()
rename_nodes([(ctc_loss_tf, output_ctc_loss_name + '/AbandonedName'), (ctc_loss, output_ctc_loss_name)])
# connect logits
ctc_greedy_decoder_tf.in_port(0).get_connection().set_destination(ctc_greedy_decoder.in_port(0))
ctc_loss.in_port(0).disconnect()
transpose_tf.in_port(0).get_connection().add_destination(ctc_loss.in_port(0))
# connect logit lengths
ctc_greedy_decoder_tf.in_port(1).disconnect()
ctc_loss.in_port(1).connect(reduce_to_seq_len_node.out_port(0))
# connect labels to ctc_loss
squeeze_op = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([2, 3])})
cast_labels_op = Cast(graph, {'name': output_sparse_to_dense_name + '/CastLabels', 'dst_type': np.int32}).create_node()
squeeze_op.in_port(0).connect(ctc_greedy_decoder.out_port(0))
cast_labels_op.in_port(0).connect(squeeze_op.out_port(0))
ctc_loss.in_port(2).connect(cast_labels_op.out_port(0))
# connect label lengths
equal_op = create_op_with_const_inputs(graph, Equal, {1: np.array([-1], dtype=np.int32)},
{'name': output_sparse_to_dense_name + '/Equal'})
equal_op.in_port(0).connect(cast_labels_op.out_port(0))
labels_shape_op = Shape(graph, {'name': output_sparse_to_dense_name + '/ShapeOf'}).create_node()
labels_shape_op.in_port(0).connect(equal_op.out_port(0))
broadcast_one = create_op_with_const_inputs(graph, Broadcast, {0: np.array([1], dtype=np.int32)},
{'mode': 'numpy',
'name': output_sparse_to_dense_name + '/One'})
broadcast_one.in_port(1).connect(labels_shape_op.out_port(0))
broadcast_zero = create_op_with_const_inputs(graph, Broadcast, {0: np.array([0], dtype=np.int32)},
{'mode': 'numpy',
'name': output_sparse_to_dense_name + '/Zero'})
broadcast_zero.in_port(1).connect(labels_shape_op.out_port(0))
select_node = Select(graph, {'name': output_sparse_to_dense_name + '/Select'}).create_node()
select_node.in_port(0).connect(equal_op.out_port(0))
select_node.in_port(1).connect(broadcast_zero.out_port(0))
select_node.in_port(2).connect(broadcast_one.out_port(0))
label_length_node = create_op_with_const_inputs(graph, ReduceSum, {1: int64_array([1])},
op_attrs={'name': output_sparse_to_dense_name + '/LabelLength',
'keep_dims': False})
label_length_node.in_port(0).connect(select_node.out_port(0))
ctc_loss.in_port(3).connect(label_length_node.out_port(0))
# set source for output of new sub-graph and remove old nodes
ctc_loss_tf.out_port(0).get_connection().set_source(ctc_loss.out_port(0))
graph.remove_nodes_from([ctc_greedy_decoder_tf.id, ctc_loss_tf.id, cast_tf.id, sparse_to_dense_tf.id])
| 9,050 | 0 | 81 |
75fc5e253de92da4a5b43457184480ef836f5afe | 253 | py | Python | gcn/csv_output.py | PeterChen607/Spectral-based-GCN-with-Attention-mechanism | dfa5798e81410440e503a636e919fb5e5214f081 | [
"MIT"
] | 3 | 2020-08-04T10:14:36.000Z | 2021-06-28T02:26:18.000Z | gcn/csv_output.py | PeterChen607/Spectral-based-GCN-with-Attention-mechanism | dfa5798e81410440e503a636e919fb5e5214f081 | [
"MIT"
] | 4 | 2020-11-13T19:00:00.000Z | 2022-02-10T02:06:46.000Z | gcn/csv_output.py | PeterChen607/Spectral-based-GCN-with-Attention-mechanism | dfa5798e81410440e503a636e919fb5e5214f081 | [
"MIT"
] | 1 | 2021-06-28T02:29:57.000Z | 2021-06-28T02:29:57.000Z | import csv | 18.071429 | 54 | 0.581028 | import csv
def csv_output(path, table):
f = open(path,'w',encoding='utf-8', newline='' "")
# 2. 基于文件对象构建 csv写入对象
csv_writer = csv.writer(f)
# 4. 写入csv文件内容
for i in table:
csv_writer.writerow(i)
# 5. 关闭文件
f.close() | 264 | 0 | 23 |
6df332d10e41fb18978a39581a88073aeb9deebf | 21,214 | py | Python | Code/TrustRegion.py | iTsingalis/torch-trust-ncg | d45b8eb8f4b45c9bdaa1801bdc9f099f90ae598e | [
"MIT"
] | null | null | null | Code/TrustRegion.py | iTsingalis/torch-trust-ncg | d45b8eb8f4b45c9bdaa1801bdc9f099f90ae598e | [
"MIT"
] | null | null | null | Code/TrustRegion.py | iTsingalis/torch-trust-ncg | d45b8eb8f4b45c9bdaa1801bdc9f099f90ae598e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @author Vasileios Choutas
# Contact: vassilis.choutas@tuebingen.mpg.de
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from typing import NewType, List, Tuple
import torch
from torch import norm
import torch.optim as optim
import torch.autograd as autograd
import math
Tensor = NewType('Tensor', torch.Tensor)
| 38.431159 | 119 | 0.555765 | # -*- coding: utf-8 -*-
# @author Vasileios Choutas
# Contact: vassilis.choutas@tuebingen.mpg.de
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from typing import NewType, List, Tuple
import torch
from torch import norm
import torch.optim as optim
import torch.autograd as autograd
import math
Tensor = NewType('Tensor', torch.Tensor)
def eye_like(tensor, device):
return torch.eye(*tensor.size(), out=torch.empty_like(tensor, device=device), device=device)
class TrustRegion(optim.Optimizer):
def __init__(self, params: List[Tensor],
max_trust_radius: float = 1000,
initial_trust_radius: float = 0.5,
eta: float = 0.15,
gtol: float = 1e-05,
kappa_easy: float = 0.1,
max_newton_iter: int = 50,
max_krylov_dim: int = 15,
lanczos_tol: float = 1e-4,
opt_method='krylov',
**kwargs) -> None:
""" Trust Region
Newton Conjugate Gradient
Uses the Conjugate Gradient Algorithm to find the solution of the
trust region sub-problem. For more details see Algorithm 7.2 of
"Numerical Optimization, Nocedal and Wright"
Generalized Lanczos Method
Uses the GEneralized Lanczos Algorithm to find the solution of the
trust region sub-problem. For more details see Algorithm7.5.2 of
"Trust Region Methods, Conn et al."
Arguments:
params (iterable): A list or iterable of tensors that will be
optimized
max_trust_radius: float
The maximum value for the trust radius
initial_trust_radius: float
The initial value for the trust region
eta: float
Minimum improvement ration for accepting a step
kappa_easy: float
Parameter related to the convergence of Krylov method, see Lemma 7.3.5 Conn et al.
max_newton_iter: int
Maximum Newton iterations for root finding
max_krylov_dim: int
Maximum Krylov dimension
lanczos_tol: float
Approximation error of the optimizer in Krylov subspace, see Theorem 7.5.10 Conn et al.
opt_method: string
The method to solve the subproblem.
gtol: float
Gradient tolerance for stopping the optimization
"""
defaults = dict()
super(TrustRegion, self).__init__(params, defaults)
self.steps = 0
self.max_trust_radius = max_trust_radius
self.initial_trust_radius = initial_trust_radius
self.eta = eta
self.gtol = gtol
self._params = self.param_groups[0]['params']
self.kappa_easy = kappa_easy
self.opt_method = opt_method
self.lanczos_tol = lanczos_tol
self.max_krylov_dim = max_krylov_dim
self.max_newton_iter = max_newton_iter
self.kwargs = kwargs
self.T_lambda = lambda _lambda, T_x, device: T_x.to(device) + _lambda * eye_like(T_x, device)
self.lambda_const = lambda lambda_k: (1 + lambda_k) * torch.sqrt(torch.tensor(torch.finfo(torch.float32).eps))
if not (opt_method == 'cg' or opt_method == 'krylov'):
raise ValueError('opt_method should be "cg" or "krylov"')
@torch.enable_grad()
def _compute_hessian_vector_product(
self,
gradient: Tensor,
p: Tensor) -> Tensor:
hess_vp = autograd.grad(
torch.sum(gradient * p, dim=-1), self._params,
only_inputs=True, retain_graph=True, allow_unused=True)
return torch.cat([torch.flatten(vp) for vp in hess_vp], dim=-1)
# hess_vp = torch.cat(
# [torch.flatten(vp) for vp in hess_vp], dim=-1)
# return torch.flatten(hess_vp)
def _gather_flat_grad(self) -> Tensor:
""" Concatenates all gradients into a single gradient vector
"""
views = []
for p in self._params:
if p.grad is None:
view = p.data.new(p.data.numel()).zero_()
elif p.grad.data.is_sparse:
view = p.grad.to_dense().view(-1)
else:
view = p.grad.view(-1)
views.append(view)
output = torch.cat(views, 0)
return output
@torch.no_grad()
def _improvement_ratio(self, p, start_loss, gradient, closure):
""" Calculates the ratio of the actual to the expected improvement
Arguments:
p (torch.tensor): The update vector for the parameters
start_loss (torch.tensor): The value of the loss function
before applying the optimization step
gradient (torch.tensor): The flattened gradient vector of the
parameters
closure (callable): The function that evaluates the loss for
the current values of the parameters
Returns:
The ratio of the actual improvement of the loss to the expected
improvement, as predicted by the local quadratic model
"""
# Apply the update on the parameter to calculate the loss on the new
# point
hess_vp = self._compute_hessian_vector_product(gradient, p)
# Apply the update of the parameter vectors.
# Use a torch.no_grad() context since we are updating the parameters in
# place
with torch.no_grad():
start_idx = 0
for param in self._params:
num_els = param.numel()
curr_upd = p[start_idx:start_idx + num_els]
param.data.add_(curr_upd.view_as(param))
start_idx += num_els
# No need to backpropagate since we only need the value of the loss at
# the new point to find the ratio of the actual and the expected
# improvement
new_loss = closure(backward=False)
# The numerator represents the actual loss decrease
numerator = start_loss - new_loss
new_quad_val = self._quad_model(p, start_loss, gradient, hess_vp)
# The denominator
denominator = start_loss - new_quad_val
# TODO: Convert to epsilon, print warning
ratio = numerator / (denominator + 1e-20)
return ratio
@torch.no_grad()
def _quad_model(
self,
p: Tensor,
loss: float,
gradient: Tensor,
hess_vp: Tensor) -> float:
""" Returns the value of the local quadratic approximation
"""
return (loss + torch.flatten(gradient * p).sum(dim=-1) +
0.5 * torch.flatten(hess_vp * p).sum(dim=-1))
@torch.no_grad()
def calc_boundaries(
self,
iterate: Tensor,
direction: Tensor,
trust_radius: float) -> Tuple[Tensor, Tensor]:
""" Calculates the offset to the boundaries of the trust region
"""
a = torch.sum(direction ** 2)
b = 2 * torch.sum(direction * iterate)
c = torch.sum(iterate ** 2) - trust_radius ** 2
sqrt_discriminant = torch.sqrt(b * b - 4 * a * c)
ta = (-b + sqrt_discriminant) / (2 * a)
tb = (-b - sqrt_discriminant) / (2 * a)
if ta.item() < tb.item():
return [ta, tb]
else:
return [tb, ta]
@torch.no_grad()
def _solve_subproblem_cg(
self,
loss: float,
flat_grad: Tensor,
trust_radius: float) -> Tuple[Tensor, bool]:
''' Solves the quadratic subproblem in the trust region
'''
# The iterate vector that contains the increment from the starting
# point
iterate = torch.zeros_like(flat_grad, requires_grad=False)
# The residual of the CG algorithm
residual = flat_grad.detach()
# The first direction of descent
direction = -residual
jac_mag = torch.norm(flat_grad).item()
# Tolerance define in Nocedal & Wright in chapter 7.1
tolerance = min(0.5, math.sqrt(jac_mag)) * jac_mag
# If the magnitude of the gradients is smaller than the tolerance then
# exit
if jac_mag <= tolerance:
return iterate, False
# Iterate to solve the subproblem
while True:
# Calculate the Hessian-Vector product
# start = time.time()
hessian_vec_prod = self._compute_hessian_vector_product(
flat_grad, direction
)
# torch.cuda.synchronize()
# print('Hessian Vector Product', time.time() - start)
# This term is equal to p^T * H * p
# start = time.time()
hevp_dot_prod = torch.sum(hessian_vec_prod * direction)
# print('p^T H p', time.time() - start)
# If non-positive curvature
if hevp_dot_prod.item() <= 0:
# Find boundaries and select minimum
# start = time.time()
ta, tb = self.calc_boundaries(iterate, direction, trust_radius)
pa = iterate + ta * direction
pb = iterate + tb * direction
# Calculate the point on the boundary with the smallest value
bound1_val = self._quad_model(pa, loss, flat_grad,
hessian_vec_prod)
bound2_val = self._quad_model(pb, loss, flat_grad,
hessian_vec_prod)
# torch.cuda.synchronize()
# print('First if', time.time() - start)
# print()
if bound1_val.item() < bound2_val.item():
return pa, True
else:
return pb, True
# The squared euclidean norm of the residual needed for the CG
# update
# start = time.time()
residual_sq_norm = torch.sum(residual * residual, dim=-1)
# Compute the step size for the CG algorithm
cg_step_size = residual_sq_norm / hevp_dot_prod
# Update the point
next_iterate = iterate + cg_step_size * direction
iterate_norm = torch.norm(next_iterate, dim=-1)
# torch.cuda.synchronize()
# print('CG Updates', time.time() - start)
# If the point is outside of the trust region project it on the
# border and return
if iterate_norm.item() >= trust_radius:
# start = time.time()
ta, tb = self.calc_boundaries(iterate, direction, trust_radius)
p_boundary = iterate + tb * direction
# torch.cuda.synchronize()
# print('Second if', time.time() - start)
# print()
return p_boundary, True
# start = time.time()
# Update the residual
next_residual = residual + cg_step_size * hessian_vec_prod
# torch.cuda.synchronize()
# print('Residual update', time.time() - start)
# If the residual is small enough, exit
if torch.norm(next_residual, dim=-1).item() < tolerance:
# print()
return next_iterate, False
# start = time.time()
beta = torch.sum(next_residual ** 2, dim=-1) / residual_sq_norm
# Compute the new search direction
direction = (-next_residual + beta * direction).squeeze()
if torch.isnan(direction).sum() > 0:
raise RuntimeError
iterate = next_iterate
residual = next_residual
# torch.cuda.synchronize()
# print('Replacing vectors', time.time() - start)
# print(trust_radius)
# print()
@torch.no_grad()
def _converged(self, s, trust_radius):
if abs(norm(s) - trust_radius) <= self.kappa_easy * trust_radius:
return True
else:
return False
@torch.no_grad()
def _lambda_one_plus(self, T, device):
eigen_pairs = torch.linalg.eigh(T)
Lambda, U = eigen_pairs.eigenvalues, eigen_pairs.eigenvectors
lambda_n, u_n = Lambda[0].to(device=device), U[:, 0].to(device=device)
return torch.maximum(-lambda_n, torch.tensor([0], device=device)), lambda_n, u_n[:, None]
@torch.no_grad()
def _quad_model_krylov(
self,
lanczos_g: Tensor,
loss: float,
s_x: Tensor,
T_x: Tensor) -> float:
"""
Returns the value of the local quadratic approximation
"""
return (loss + torch.sum(lanczos_g * s_x) + 1 / 2 * torch.sum(T_x.mm(s_x) * s_x)).item()
def _root_finder(self, trust_radius, T_x, lanczos_g, loss, device):
n_iter_nu, n_iter_r = 0, 0
lambda_k, lambda_n, u_n = self._lambda_one_plus(T_x, device)
lambda_const = self.lambda_const(lambda_k).to(device=device)
if lambda_k == 0: # T_x is positive definite
_lambda = torch.tensor([0], dtype=torch.float32, device=device) # + lambda_const
else:
_lambda = lambda_k + lambda_const
s, L = self._compute_s(_lambda=_lambda, lambda_const=lambda_const, lanczos_g=lanczos_g, T_x=T_x, device=device)
if norm(s) <= trust_radius:
if _lambda == 0 or norm(s) == trust_radius:
return s
else:
ta, tb = self.calc_boundaries(iterate=s, direction=u_n, trust_radius=trust_radius)
pa = s + ta * u_n
pb = s + tb * u_n
# Calculate the point on the boundary with the smallest value
bound1_val = self._quad_model_krylov(lanczos_g, loss, pa, T_x)
bound2_val = self._quad_model_krylov(lanczos_g, loss, pb, T_x)
if bound1_val < bound2_val:
return pa
else:
return pb
while True:
if self._converged(s, trust_radius) or norm(s) < torch.finfo(float).eps:
break
w = torch.triangular_solve(s, L.T.to(device=device), upper=False).solution
_lambda = self._nu_next(_lambda, trust_radius, s, w)
s, L = self._compute_s(_lambda, lambda_const, lanczos_g, T_x, device)
n_iter_nu += 1
if n_iter_nu > self.max_newton_iter - 1: # self.max_krylov_dim:
print(RuntimeWarning(
'Maximum number of newton iterations exceeded for _lambda: {}'.format(_lambda)))
break
return s
@torch.no_grad()
def _nu_next(self, _lambda, trust_radius, s, w):
norm_s = norm(s)
norm_w = norm(w)
phi = 1 / norm_s - 1 / trust_radius
phi_prime = norm_w ** 2 / norm_s ** 3
return _lambda - phi / phi_prime
@torch.no_grad()
def _compute_s(self, _lambda, lambda_const, lanczos_g, T_x, device):
try:
L = torch.linalg.cholesky(self.T_lambda(_lambda, T_x, device))
except RuntimeError:
print('Recursion')
lambda_const *= 2
# RecursionError: maximum recursion depth exceeded while calling a Python object
s, L = self._compute_s(_lambda + lambda_const, lambda_const, lanczos_g, T_x, device)
s = torch.cholesky_solve(-lanczos_g[:, None], L.to(device=device), upper=True)
return s, L
@torch.no_grad()
def _solve_subproblem_krylov(
self,
loss: float,
flat_grad: Tensor,
trust_radius: float) -> Tuple[Tensor, bool]:
"""
Solves the quadratic subproblem in the trust region using Generalized Lanczos Method,
see Algorithm 7.5.2 Conn et al.
"""
INTERIOR_FLAG = True
Q, diagonals, off_diagonals = [], [], []
flat_grads_detached = flat_grad.detach()
n_features = len(flat_grads_detached)
h = torch.zeros_like(flat_grads_detached, requires_grad=False)
q, p = flat_grads_detached, -flat_grads_detached
gamma0 = torch.norm(q)
krylov_dim, sigma = 0, 1
while True:
Hp = self._compute_hessian_vector_product(flat_grad, p)
ptHp = torch.sum(Hp * p)
alpha = torch.norm(q) ** 2 / ptHp
if alpha == 0:
print('hard case')
if krylov_dim == 0:
diagonals.append(1. / alpha.item())
off_diagonals.append(float('inf')) # dummy value
Q.append(sigma * q / norm(q))
T_x = torch.Tensor([diagonals])
alpha_prev = alpha
else:
diagonals.append(1. / alpha.item() + beta.item() / alpha_prev.item())
sigma = - torch.sign(alpha_prev) * sigma
Q.append(sigma * q / norm(q))
T_x = (torch.diag(torch.tensor(diagonals, dtype=torch.float32), 0)
+ torch.diag(torch.tensor(off_diagonals[1:], dtype=torch.float32), -1)
+ torch.diag(torch.tensor(off_diagonals[1:], dtype=torch.float32), 1))
alpha_prev = alpha
if INTERIOR_FLAG and alpha < 0 or torch.norm(h + alpha * p) >= trust_radius:
INTERIOR_FLAG = False
if INTERIOR_FLAG:
h = h + alpha * p
else:
# Lanczos Step 2: solve problem in subspace
e_1 = torch.eye(1, krylov_dim + 1, device=flat_grad.device).flatten()
lanczos_g = gamma0 * e_1
s = self._root_finder(trust_radius=trust_radius,
T_x=T_x, lanczos_g=lanczos_g,
loss=loss, device=flat_grad.device)
s = s.to(flat_grad.device)
q_next = q + alpha * Hp
# test for convergence
if INTERIOR_FLAG and norm(q_next) ** 2 < self.lanczos_tol:
break
if not INTERIOR_FLAG and torch.norm(q_next) * abs(s[-1]) < self.lanczos_tol:
break
if krylov_dim == n_features:
print(RuntimeWarning('Krylov dimensionality reach full space! Breaking out..'))
break
# return h
if krylov_dim > self.max_krylov_dim:
print(RuntimeWarning('Max Krylov dimension reached! Breaking out..'))
break
beta = torch.dot(q_next, q_next) / torch.dot(q, q)
off_diagonals.append(torch.sqrt(beta) / torch.abs(alpha_prev))
p = -q_next + beta * p
q = q_next
krylov_dim = krylov_dim + 1
if not INTERIOR_FLAG:
# Return to the original space
Q = torch.vstack(Q).T
h = torch.sum(Q * torch.squeeze(s), dim=1)
return h, not INTERIOR_FLAG # INTERIOR_FLAG is False == hit_boundary is True
def step(self, closure=None) -> float:
starting_loss = closure(backward=True)
flat_grad = self._gather_flat_grad()
state = self.state
if len(state) == 0:
state['trust_radius'] = torch.full([1],
self.initial_trust_radius,
dtype=flat_grad.dtype,
device=flat_grad.device)
trust_radius = state['trust_radius']
if self.opt_method == 'cg':
param_step, hit_boundary = self._solve_subproblem_cg(starting_loss, flat_grad, trust_radius)
else:
param_step, hit_boundary = self._solve_subproblem_krylov(starting_loss, flat_grad, trust_radius)
self.param_step = param_step
if torch.norm(param_step).item() <= self.gtol:
return starting_loss
improvement_ratio = self._improvement_ratio(
param_step, starting_loss, flat_grad, closure)
if improvement_ratio.item() < 0.25:
trust_radius.mul_(0.25)
else:
if improvement_ratio.item() > 0.75 and hit_boundary:
trust_radius.mul_(2).clamp_(0.0, self.max_trust_radius)
if improvement_ratio.item() <= self.eta:
# If the improvement is not sufficient, then undo the update
start_idx = 0
for param in self._params:
num_els = param.numel()
curr_upd = param_step[start_idx:start_idx + num_els]
param.data.add_(-curr_upd.view_as(param))
start_idx += num_els
self.steps += 1
return starting_loss
| 5,293 | 15,468 | 46 |
0af89f7156b96ef95ef85efe8d69e2dfa62d252c | 5,356 | py | Python | chinesenotes/similarity_train.py | alexamies/chinesenotes-python | 778712e98a54860e9bb24a5111a9ca8a96644214 | [
"Apache-2.0"
] | null | null | null | chinesenotes/similarity_train.py | alexamies/chinesenotes-python | 778712e98a54860e9bb24a5111a9ca8a96644214 | [
"Apache-2.0"
] | null | null | null | chinesenotes/similarity_train.py | alexamies/chinesenotes-python | 778712e98a54860e9bb24a5111a9ca8a96644214 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Trains a decision tree classifier for phrase similarity.
Reads the input file and trains the classifier.
"""
import argparse
import csv
import logging
import graphviz
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.metrics import classification_report
from sklearn.tree import export_graphviz
INFILE_DEF = 'data/phrase_similarity_combined.csv'
OUTFILE_DEF = 'drawings/phrase_similarity_graph.png'
def run(infile, outfile, val_file):
"""Load training data and train the classifier
Args:
infile: input file with the mutual information and training points
outfile: file name to write graphviz export to
"""
x, y = load_training2(infile)
feature_names = ['Unigram count / len', 'Hamming distance / len']
train(x, y, feature_names, outfile)
x, y = load_training3(infile)
feature_names = ['Unigram count', 'Hamming distance', 'Query length']
clf = train(x, y, feature_names, outfile)
if len(val_file) > 0:
x, y = load_training3(val_file)
validate(clf, x, y, feature_names)
def train(x, y, feature_names, outfile):
"""Train the classifier
Args:
x: list of values for feature variables
y: list of target values
feature_names: Names of feature variables
outfile: file name to write graphviz export to
Returns:
the trained classifier
"""
clf = tree.DecisionTreeClassifier(random_state=0,
max_depth=2,
criterion='gini',
min_samples_split=3)
clf = clf.fit(x, y)
score = clf.score(x, y)
logging.info(f'Classifier score: {score}\n')
y_pred = clf.predict(x)
print("Training results")
print(classification_report(y, y_pred))
dot_data = tree.export_graphviz(clf, filled = True, rounded = True)
graph = graphviz.Source(dot_data)
class_names = ['Not relevant', 'Relevant']
r = tree.export_text(clf, feature_names=feature_names)
print(r)
tree.plot_tree(clf,
feature_names=feature_names,
class_names=class_names,
filled=True,
impurity=False)
#plt.show()
plt.savefig(outfile, dpi=160)
return clf
def validate(clf, x, y, feature_names):
"""Validate the classifier
Args:
x: list of values for feature variables
y: list of target values
feature_names: Names of feature variables
"""
y_pred = clf.predict(x)
print("Validation results")
print(classification_report(y, y_pred))
def load_training2(infile):
"""Load training data with normalized unigram count and hamming distance.
Args:
infile: file name to load data from
"""
X = []
Y = []
with open(infile, 'r') as f:
reader = csv.reader(f)
for row in reader:
if reader.line_num == 1: # Skip header row
continue
if len(row) > 8:
query = row[0]
unigram_count = float(row[5]) / len(query)
hamming = float(row[6]) / len(query)
relevance = int(row[8])
x = [unigram_count, hamming]
X.append(x)
Y.append(relevance)
return (X, Y)
def load_training3(infile):
"""Load training data with normalized unigram count, hamming distance, and
query length.
Args:
infile: file name to load data from
"""
X = []
Y = []
with open(infile, 'r') as f:
reader = csv.reader(f)
for row in reader:
if reader.line_num == 1: # Skip header row
continue
if len(row) > 8:
query = row[0]
unigram_count = int(row[5])
hamming = int(row[6])
relevance = int(row[8])
x = [unigram_count, hamming, len(query)]
X.append(x)
Y.append(relevance)
return (X, Y)
# Entry point from a script
if __name__ == "__main__":
main() | 30.605714 | 86 | 0.648432 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Trains a decision tree classifier for phrase similarity.
Reads the input file and trains the classifier.
"""
import argparse
import csv
import logging
import graphviz
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.metrics import classification_report
from sklearn.tree import export_graphviz
INFILE_DEF = 'data/phrase_similarity_combined.csv'
OUTFILE_DEF = 'drawings/phrase_similarity_graph.png'
def run(infile, outfile, val_file):
"""Load training data and train the classifier
Args:
infile: input file with the mutual information and training points
outfile: file name to write graphviz export to
"""
x, y = load_training2(infile)
feature_names = ['Unigram count / len', 'Hamming distance / len']
train(x, y, feature_names, outfile)
x, y = load_training3(infile)
feature_names = ['Unigram count', 'Hamming distance', 'Query length']
clf = train(x, y, feature_names, outfile)
if len(val_file) > 0:
x, y = load_training3(val_file)
validate(clf, x, y, feature_names)
def train(x, y, feature_names, outfile):
"""Train the classifier
Args:
x: list of values for feature variables
y: list of target values
feature_names: Names of feature variables
outfile: file name to write graphviz export to
Returns:
the trained classifier
"""
clf = tree.DecisionTreeClassifier(random_state=0,
max_depth=2,
criterion='gini',
min_samples_split=3)
clf = clf.fit(x, y)
score = clf.score(x, y)
logging.info(f'Classifier score: {score}\n')
y_pred = clf.predict(x)
print("Training results")
print(classification_report(y, y_pred))
dot_data = tree.export_graphviz(clf, filled = True, rounded = True)
graph = graphviz.Source(dot_data)
class_names = ['Not relevant', 'Relevant']
r = tree.export_text(clf, feature_names=feature_names)
print(r)
tree.plot_tree(clf,
feature_names=feature_names,
class_names=class_names,
filled=True,
impurity=False)
#plt.show()
plt.savefig(outfile, dpi=160)
return clf
def validate(clf, x, y, feature_names):
"""Validate the classifier
Args:
x: list of values for feature variables
y: list of target values
feature_names: Names of feature variables
"""
y_pred = clf.predict(x)
print("Validation results")
print(classification_report(y, y_pred))
def load_training2(infile):
"""Load training data with normalized unigram count and hamming distance.
Args:
infile: file name to load data from
"""
X = []
Y = []
with open(infile, 'r') as f:
reader = csv.reader(f)
for row in reader:
if reader.line_num == 1: # Skip header row
continue
if len(row) > 8:
query = row[0]
unigram_count = float(row[5]) / len(query)
hamming = float(row[6]) / len(query)
relevance = int(row[8])
x = [unigram_count, hamming]
X.append(x)
Y.append(relevance)
return (X, Y)
def load_training3(infile):
"""Load training data with normalized unigram count, hamming distance, and
query length.
Args:
infile: file name to load data from
"""
X = []
Y = []
with open(infile, 'r') as f:
reader = csv.reader(f)
for row in reader:
if reader.line_num == 1: # Skip header row
continue
if len(row) > 8:
query = row[0]
unigram_count = int(row[5])
hamming = int(row[6])
relevance = int(row[8])
x = [unigram_count, hamming, len(query)]
X.append(x)
Y.append(relevance)
return (X, Y)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--infile',
dest='infile',
default=INFILE_DEF,
help='File name to read training data from')
parser.add_argument('--outfile',
dest='outfile',
default=OUTFILE_DEF,
help='File name to write output to')
parser.add_argument('--valfile',
dest='valfile',
default="",
help='File name to read validation data from')
args = parser.parse_args()
logging.info(f'Training decision tree from {args.infile}, output to {args.outfile}')
run(args.infile, args.outfile, args.valfile)
# Entry point from a script
if __name__ == "__main__":
main() | 766 | 0 | 23 |
5c4ff07fa09c1f57cc06fbf9936c0f7bf55c9908 | 1,693 | py | Python | script.py | Feezy15/subreddit_scraper | 9e9ec989101f13e9474234473e62493034eb7611 | [
"MIT"
] | null | null | null | script.py | Feezy15/subreddit_scraper | 9e9ec989101f13e9474234473e62493034eb7611 | [
"MIT"
] | null | null | null | script.py | Feezy15/subreddit_scraper | 9e9ec989101f13e9474234473e62493034eb7611 | [
"MIT"
] | null | null | null | """ A script that scrapes various bits of data from subreddits
"""
import praw
import requests
import os
import sqlite3
SQ_LITE_FILE = "avexchange_data.db"
def get_urls(subreddit):
""" scrape for certain keywords on a subreddit
"""
print("getting urls from r/{}".format(subreddit))
reddit = praw.Reddit("bot1")
conn = sqlite3.connect(SQ_LITE_FILE)
c = conn.cursor()
# find posts in the last hour
for submission in reddit.subreddit(subreddit).search("KZ OR Final OR Sennheiser OR Hifiman OR Pro", sort="new", time_filter="hour"):
print("Title: {}".format(submission.title))
print("Text: {}".format(submission.selftext))
# c.execute("SELECT 1 FROM posts WHERE fullname=? LIMIT 1", (submission.fullname, ))
# post_exists = c.fetchone() is not None
# if not post_exists:
try:
c.execute("INSERT INTO posts VALUES({}, {}, {}) ESCAPE '|'"
.format(submission.fullname, submission.title, submission.selftext))
except sqlite3.IntegrityError as e:
pass
# print("--------\n")
conn.commit()
conn.close()
if __name__ == "__main__":
init_database()
get_urls("avexchange")
| 32.557692 | 136 | 0.627289 | """ A script that scrapes various bits of data from subreddits
"""
import praw
import requests
import os
import sqlite3
SQ_LITE_FILE = "avexchange_data.db"
def init_database():
if not os.path.isfile(os.path.dirname(os.path.abspath(__file__)) + "/{}".format(SQ_LITE_FILE)):
print("creating db {}-".format(SQ_LITE_FILE))
conn = sqlite3.connect(SQ_LITE_FILE)
# reddit fullname as primary key
conn.execute("""CREATE TABLE IF NOT EXISTS posts
(fullname TEXT PRIMARY KEY,
title TEXT NOT NULL,
post_info TEXT NOT NULL)""")
conn.commit()
conn.close()
def get_urls(subreddit):
""" scrape for certain keywords on a subreddit
"""
print("getting urls from r/{}".format(subreddit))
reddit = praw.Reddit("bot1")
conn = sqlite3.connect(SQ_LITE_FILE)
c = conn.cursor()
# find posts in the last hour
for submission in reddit.subreddit(subreddit).search("KZ OR Final OR Sennheiser OR Hifiman OR Pro", sort="new", time_filter="hour"):
print("Title: {}".format(submission.title))
print("Text: {}".format(submission.selftext))
# c.execute("SELECT 1 FROM posts WHERE fullname=? LIMIT 1", (submission.fullname, ))
# post_exists = c.fetchone() is not None
# if not post_exists:
try:
c.execute("INSERT INTO posts VALUES({}, {}, {}) ESCAPE '|'"
.format(submission.fullname, submission.title, submission.selftext))
except sqlite3.IntegrityError as e:
pass
# print("--------\n")
conn.commit()
conn.close()
if __name__ == "__main__":
init_database()
get_urls("avexchange")
| 459 | 0 | 23 |
5aba35ab361297d91284da0b44616c3ca2742dbb | 14,791 | py | Python | nodemonitor/agent.py | foxty/node_monitor | 2d3b8870a20cf2fe4d7ae3f4c95f2fd74da6c8e0 | [
"Apache-2.0"
] | 3 | 2018-01-09T05:58:21.000Z | 2021-07-15T13:18:10.000Z | nodemonitor/agent.py | foxty/node_monitor | 2d3b8870a20cf2fe4d7ae3f4c95f2fd74da6c8e0 | [
"Apache-2.0"
] | 14 | 2018-01-17T06:19:48.000Z | 2022-02-12T02:29:14.000Z | nodemonitor/agent.py | foxty/node-monitor | 2d3b8870a20cf2fe4d7ae3f4c95f2fd74da6c8e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 2017-12-22
@author: foxty
Node Agent
"""
# ==============================
# Node Agent
# ==============================
import os
import sys
import logging
import logging.handlers
import socket
import select
import Queue as Q
import threading
from datetime import datetime
from subprocess import call
from common import Msg, InvalidMsgError, is_win, is_sunos, ostype, OSType, set_logging, check_output, \
process_info, interpret_exp, send_msg, read_msg
_MAX_BACKOFF_SECOND = 60 # in agent retry policy
DEF_CONFIG = {
"version": 0,
"clock_interval": 10,
"heartbeat_clocks": 6,
"node_metrics": [],
"service_metrics": {},
"services": []
}
class NodeAgent:
"""Agent will running the node as and keep send stats data to Master via TCP connection."""
SEND_BUF = 128*1024
@property
def add_msg(self, msg):
"""Add new msg to the queue and remove oldest msg if its full"""
retry = 0
while True:
try:
if self._queue.full():
oldest_msg = self._queue.get_nowait()
logging.debug('q is full, msg %s abandoned, qsize=%d.',
oldest_msg, self._queue.qsize())
self._queue.put_nowait(msg)
logging.info('msg %s added to queue, retry=%d, qsize=%d.', msg, retry, self._queue.qsize())
break;
except Q.Full:
# Queue is full, retry
retry += 1
def _do_reg(self):
"""Produce a agent reg message after connected"""
logging.info('do registration...')
reg_data = {'os': ostype(), 'hostname': self._hostname}
reg_msg = Msg.create_msg(self._agentid, Msg.A_REG, reg_data)
self.add_msg(reg_msg)
if __name__ == '__main__':
basepath = os.path.dirname(sys.path[0])
set_logging('agent.log')
args = sys.argv[1:]
mhost = 'localhost'
mport = 30079
if len(args) == 0:
print('Usage: agnet.py master_host[:port]')
exit(-1)
if ':' in args[0]:
addr = args[0].split(':')
mhost, mport = addr[0], int(addr[1])
else:
mhost = args[0]
agent = NodeAgent(mhost, mport)
agent.start() | 34.639344 | 107 | 0.573862 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 2017-12-22
@author: foxty
Node Agent
"""
# ==============================
# Node Agent
# ==============================
import os
import sys
import logging
import logging.handlers
import socket
import select
import Queue as Q
import threading
from datetime import datetime
from subprocess import call
from common import Msg, InvalidMsgError, is_win, is_sunos, ostype, OSType, set_logging, check_output, \
process_info, interpret_exp, send_msg, read_msg
_MAX_BACKOFF_SECOND = 60 # in agent retry policy
DEF_CONFIG = {
"version": 0,
"clock_interval": 10,
"heartbeat_clocks": 6,
"node_metrics": [],
"service_metrics": {},
"services": []
}
def is_metric_valid(metric):
if 'name' not in metric or 'cmd' not in metric:
logging.warn('incompleted metric definition %s', metric)
return False
name = metric['name']
os = metric.get('os')
cmd = metric['cmd']
checkcmd = 'which'
if is_win():
checkcmd = 'where'
if is_sunos():
checkcmd = 'type'
if os is None or os == ostype():
valid = call([checkcmd, cmd[0]]) == 0
else:
valid = False
logging.info('check metric %s with os=%s -> %s', name, os, valid)
return valid
class AgentConfig(object):
def __init__(self, config):
self._config = config;
self._version = config['version']
self._node_metrics = config.get('node_metrics', [])
self._valid_node_metrics = None
self._service_metrics = config.get('service_metrics', {})
self._services = config.get('services',[])
self._valid_services = None
self._validate()
# clocks
self._clock_interval = config.get('clock_interval', 10)
self._hb_clocks = config.get('heartbeat_clocks', 60)
logging.info('agent config version %s', self._version)
def _validate(self):
# check node metrics
self._valid_node_metrics = [v for v in self._node_metrics if is_metric_valid(v)]
logging.info('valid node metrics = %s', self._valid_node_metrics)
# check sevice metrics
logging.info('valid service metrics = %s', self._service_metrics)
# check services
invalid_serivces = [s for s in self._services if 'name' not in s or 'lookup_keyword' not in s]
if invalid_serivces:
self._valid_services = [s for s in self._services if s not in invalid_serivces]
else:
self._valid_services = self._services
logging.info('valid service=%s, invalid services=%s',
map(lambda x: x['name'], self._valid_services),
map(lambda x: x['name'], invalid_serivces))
@property
def version(self):
return self._version
@property
def node_metrics(self):
return self._node_metrics
@property
def valid_node_metrics(self):
return self._valid_node_metrics
@property
def service_metrics(self):
return self._service_metrics
@property
def valid_services(self):
return self._valid_services
@property
def clock_interval(self):
return self._clock_interval
@property
def hb_clocks(self):
return self._hb_clocks
class NodeCollector(threading.Thread):
def __init__(self, agent, config):
super(NodeCollector, self).__init__(target=self._collect, name='NodeCollector')
self._agent = agent
self._agentid = agent.agentid
self._delay = threading.Event()
self._config = config
self._stopped = False
self.setDaemon(True)
def reload_config(self, cfg):
logging.info('config reloaded by %s', cfg)
self._config = AgentConfig(cfg)
def stop(self):
self._stopped = True
def _collect(self):
loops = 1;
while not self._stopped:
interval = self._config.clock_interval
self._delay.wait(interval)
time1 = datetime.now()
try:
try:
if loops % self._config.hb_clocks == 0:
self._prod_heartbeat()
self._collect_nmetrics(loops)
self._collect_smetrics(loops)
except Exception:
logging.exception('error during collect metrics, wait for next round.')
finally:
loops = loops + 1
time2 = datetime.now()
time_used = (time2 - time1).seconds
interval = self._config.clock_interval - time_used
logging.info('agent collector stopped after %s loops', loops)
def _prod_heartbeat(self):
logging.info('produce heartbeat...')
body = {'datetime': datetime.utcnow(), 'config_version': self._config.version}
hb_msg = Msg.create_msg(self._agentid, Msg.A_HEARTBEAT, body)
self._agent.add_msg(hb_msg)
def _translate_cmd(self, cmd, context={}):
logging.debug('translate cmd=%s by context=%s', cmd, context)
newcmd = []
for c in cmd:
c = interpret_exp(c, context)
newcmd.append(c)
return newcmd
def _get_cmd_result(self, cmd):
"""
Execute cmd on local OS and return output of cmd
:param cmd:
:return: result string
"""
result = 'NOT COLLECTED'
try:
result = check_output(cmd)
except Exception:
logging.exception('call cmd %s failed', cmd)
result = 'call cmd %s failed.' % cmd
return result
def _collect_nmetrics(self, loops):
"""
Collect node metrics
:param loops: current loops
"""
logging.info('try to collecting node metrics, loops=%d', loops)
nmetrics_result = {}
for nm in self._config.valid_node_metrics:
if loops % nm.get('clocks', 6) == 0:
nmetrics_result[nm['name']] = self._get_cmd_result(nm['cmd'])
if nmetrics_result:
msg = Msg.create_msg(self._agentid, Msg.A_NODE_METRIC, nmetrics_result)
msg.set_header(msg.H_COLLECT_AT, datetime.utcnow())
self._agent.add_msg(msg)
logging.info('%d node metrics collected', len(nmetrics_result))
else:
logging.info('no metric collected ')
def _collect_smetrics(self, loops):
"""Collect services metrics"""
services = self._config.valid_services
logging.info('try to collect services metrics, loops=%s.', loops)
for service in services:
clocks = service['clocks']
if loops % clocks == 0:
self._collect_service(service)
def _collect_service(self, service):
"""
Collect defined metrics from service
:param service: service info
:return: collected result in dict
"""
logging.debug('collect service : %s', service)
name = service['name']
stype = service.get('type', None)
lookup = service['lookup_keyword']
puser, pid, penvs = process_info(lookup)
if not pid:
logging.info('can not find process "%s" by "%s".', name, lookup)
return
metric_names = service['metrics']
clocks = service['clocks']
env = service.get('env', {})
# interpret configured env with process envs
for k, v in env.items():
env[k] = interpret_exp(v, penvs)
env['pid'] = pid
env['puser'] = puser
logging.info('start to collect metrics for service [%s(%s)]: metrics=%s, clocks=%s.',
name, pid, metric_names, clocks)
service_result = {'name': name, 'pid': pid, 'puser': puser, 'type': stype}
service_metrics = {}
for mname in metric_names:
try:
metric = self._config.service_metrics[mname]
ocmd = metric['cmd']
logging.info('collect %s by %s', mname, ocmd)
cmd = self._translate_cmd(ocmd, env)
logging.info('command translated to %s', cmd)
if not is_metric_valid(metric):
logging.info('cmd %s is not a valid command, try next', cmd[0])
continue
service_metrics[mname] = self._get_cmd_result(cmd)
except Exception:
logging.exception('collect metrics %s for service %s failed: cmd=%s', mname, name, ocmd)
service_result['metrics'] = service_metrics
# send message
msg = Msg.create_msg(self._agentid, Msg.A_SERVICE_METRIC, service_result)
msg.set_header(Msg.H_COLLECT_AT, datetime.utcnow())
self._agent.add_msg(msg)
logging.info('%d metrics collected for %s.', len(service_metrics), name)
return service_result
class NodeAgent:
"""Agent will running the node as and keep send stats data to Master via TCP connection."""
SEND_BUF = 128*1024
def __init__(self, master_host, master_port):
self._hostname = socket.gethostname()
self._agentid = self._gen_agentid()
self._master_addr = (master_host, master_port)
self._started = False
self._queue = Q.Queue(maxsize=8)
self._retry = threading.Event()
self._config = AgentConfig(DEF_CONFIG)
self._node_collector = NodeCollector(self, self._config)
logging.info('agent init with id=%s, host=%s, master=%s, hostname=%s',
self._agentid, self._hostname, self._master_addr, self._hostname)
@property
def agentid(self):
return self._agentid
def _gen_agentid(self):
aid = None
if ostype() in [OSType.WIN, OSType.SUNOS]:
aid = self._hostname
else:
aid = check_output(['hostid']).strip()
logging.info('agent id %s generated for %s', aid, self._hostname)
return aid
def _connect_master(self):
if getattr(self, 'sock', None):
logging.warn('found exiting socket, now close it.')
try:
self.sock.close()
except socket.error:
pass
logging.info('connecting master %s', self._master_addr)
tried = 0
while 1:
tried = tried + 1
try:
sock = socket.socket()
sock.connect(self._master_addr)
sock.setblocking(False)
sendbuf = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
logging.info('default send buffer is %s, will change to %s.', sendbuf, self.SEND_BUF)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024*128)
break
except socket.error:
sleeptime = min(_MAX_BACKOFF_SECOND, tried ** 2)
logging.exception('Cannot connect %s(tried=%d), retry after %d seconds...',
self._master_addr, tried, sleeptime)
self._retry.wait(sleeptime)
logging.info('connect master(%s) succed.', self._master_addr)
self.sock = sock
# do agent_reg
self._do_reg()
def start(self):
logging.info('agent %s starting ...', self._agentid)
self._connect_master()
self._node_collector.start()
self._started = True
self._loop()
def stop(self):
self._started = False
self.sock.close()
logging.info('agent %s stopped.', self._agentid)
def add_msg(self, msg):
"""Add new msg to the queue and remove oldest msg if its full"""
retry = 0
while True:
try:
if self._queue.full():
oldest_msg = self._queue.get_nowait()
logging.debug('q is full, msg %s abandoned, qsize=%d.',
oldest_msg, self._queue.qsize())
self._queue.put_nowait(msg)
logging.info('msg %s added to queue, retry=%d, qsize=%d.', msg, retry, self._queue.qsize())
break;
except Q.Full:
# Queue is full, retry
retry += 1
def _do_reg(self):
"""Produce a agent reg message after connected"""
logging.info('do registration...')
reg_data = {'os': ostype(), 'hostname': self._hostname}
reg_msg = Msg.create_msg(self._agentid, Msg.A_REG, reg_data)
self.add_msg(reg_msg)
def _loop(self):
logging.info('start agent looping...')
while self._started:
rlist = elist = [self.sock]
wlist = []
if not self._queue.empty():
wlist = [self.sock]
try:
# wait for 5 seconds in each loop to avoid cpu consuming
rlist, wlist, elist = select.select(rlist, wlist, wlist, 5)
if rlist:
self._do_read(rlist[0])
if wlist:
self._do_write(wlist[0])
if elist:
self._do_error(elist[0])
except socket.error:
logging.exception('error in loop.')
self._connect_master()
except InvalidMsgError:
logging.error('invalid message received, reconnecting...')
self._connect_master()
def _do_read(self, sock):
msg = read_msg(sock)
if msg is None:
logging.warn('can not get msg from %s', sock.getpeername())
else:
logging.info('receive msg %s', msg)
if msg.msg_type == Msg.M_CONFIG_UPDATE:
newconfig = msg.body['config']
self._node_collector.reload_config(newconfig)
def _do_write(self, sock):
while not self._queue.empty():
try:
msg = self._queue.get_nowait()
except Q.Empty:
logging.warn('Try to get msg from empty queue..')
return
msg.set_header(msg.H_SEND_AT, datetime.utcnow())
size, times = send_msg(sock, msg)
logging.info('msg %s sent to %s use %d times', msg, self._master_addr, times)
logging.debug('msg data = %s', msg.body)
def _do_error(self, sock):
logging.info('error happens for %s', sock)
if __name__ == '__main__':
basepath = os.path.dirname(sys.path[0])
set_logging('agent.log')
args = sys.argv[1:]
mhost = 'localhost'
mport = 30079
if len(args) == 0:
print('Usage: agnet.py master_host[:port]')
exit(-1)
if ':' in args[0]:
addr = args[0].split(':')
mhost, mport = addr[0], int(addr[1])
else:
mhost = args[0]
agent = NodeAgent(mhost, mport)
agent.start() | 7,974 | 4,188 | 338 |
2d94e570a5fb66ee4ea2b0b645009575fe90c529 | 4,419 | py | Python | pyroute/compress/compress.py | ftrimble/route-grower | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | [
"Apache-2.0"
] | null | null | null | pyroute/compress/compress.py | ftrimble/route-grower | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | [
"Apache-2.0"
] | null | null | null | pyroute/compress/compress.py | ftrimble/route-grower | d4343ecc9b13a3e1701c8460c8a1792d08b74567 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#----------------------------------------------------------------
#
#------------------------------------------------------
# Usage:
#
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
import sys
import os
from xml.sax import make_parser, handler
import xml
from struct import *
# Parse the supplied OSM file
if __name__ == "__main__":
print "Loading data..."
Binary = BinaryOsm()
Binary.encode(sys.argv[1], sys.argv[2])
| 31.340426 | 218 | 0.56687 | #!/usr/bin/python
#----------------------------------------------------------------
#
#------------------------------------------------------
# Usage:
#
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
import sys
import os
from xml.sax import make_parser, handler
import xml
from struct import *
class BinaryOsm(handler.ContentHandler):
def __init__(self):
pass
def encode(self, filename, output):
self.nextKID = 3
self.nextVID = 1
self.tags = {}
self.values = {}
if(not os.path.exists(filename)):
print "No such data file %s" % filename
return
try:
self.out = open(output, "wb")
parser = make_parser()
parser.setContentHandler(self)
parser.parse(filename)
self.out.write("X")
self.out.close()
except xml.sax._exceptions.SAXParseException:
print "Error loading %s" % filename
def startElement(self, name, attrs):
"""Handle XML elements"""
if(name =='node'):
self.meta = { \
'id':int(attrs.get('id')),
'lon':float(attrs.get('lat')),
'lat':float(attrs.get('lon'))
}
self.tags = {}
elif(name == 'way'):
self.meta = {'id':int(attrs.get('id'))}
self.tags = {}
self.waynodes = []
elif(name == 'relation'):
self.tags = {}
elif name == 'nd':
"""Nodes within a way -- add them to a list"""
self.waynodes.append(int(attrs.get('ref')))
elif name == 'tag':
"""Tags - store them in a hash"""
k,v = (attrs.get('k'), attrs.get('v'))
if not k in ('created_by'):
self.tags[k] = v
def endElement(self, name):
"""Handle ways in the OSM data"""
writeTags = False
if(name =='node'):
data = 'N' + pack("L", self.meta['id']) + self.encodeLL(self.meta['lat'], self.meta['lon'])
self.out.write(data)
writeTags = True
elif(name == 'way'):
data = 'W' + pack("L", self.meta['id'])
self.out.write(data)
self.out.write(pack('H', len(self.waynodes)))
for n in self.waynodes:
self.out.write(pack('L', n))
writeTags = True
if(writeTags):
n = len(self.tags.keys())
if(n > 255):
# TODO:
print "Error: more than 255 tags on an item"
return
self.out.write(pack('B', n))
for k,v in self.tags.items():
self.encodeTag(k, False, k)
volatile = k in ('name','ref','ncn_ref','note','notes','description','ele','time','url','website','postal_code','image','source_ref','source:ref','source:name','source_ref:name',"FIXME","fixme","place_numbers")
self.encodeTag(v,volatile,k)
def encodeTag(self,text,volatile,key):
text = text.encode('utf8')
if(not volatile):
try:
ID = self.values[text]
self.out.write(pack('H', ID))
except KeyError:
if(self.nextKID >= 65535):
# TODO
print "Error: too many stored tags!"
sys.exit()
print "%d: %s %s" % (self.nextKID, key,text)
self.values[text] = self.nextKID
self.out.write(pack('HHB', 1, self.nextKID, len(text)))
self.out.write(text)
self.nextKID = self.nextKID + 1
else:
self.out.write(pack('HB', 0, len(text)))
self.out.write(text)
#print "Storing simple %s" % (text)
def encodeLL(self,lat,lon):
pLat = (lat + 90.0) / 180.0
pLon = (lon + 180.0) / 360.0
iLat = self.encodeP(pLat)
iLon = self.encodeP(pLon)
return(pack("II", iLat, iLon))
def encodeP(self,p):
i = int(p * 4294967296.0)
return(i)
# Parse the supplied OSM file
if __name__ == "__main__":
print "Loading data..."
Binary = BinaryOsm()
Binary.encode(sys.argv[1], sys.argv[2])
| 1,337 | 1,884 | 23 |
b6cfc149a70283a6388eec6ed094070716e7e105 | 5,357 | py | Python | gooey/gui/components/options/validators.py | geosaleh/Gooey | c2aa8afc269271d55f011c6bc89828992a30b3f0 | [
"MIT"
] | 1 | 2022-02-21T05:51:21.000Z | 2022-02-21T05:51:21.000Z | gooey/gui/components/options/validators.py | geosaleh/Gooey | c2aa8afc269271d55f011c6bc89828992a30b3f0 | [
"MIT"
] | 1 | 2021-12-02T07:42:03.000Z | 2021-12-02T07:42:03.000Z | gooey/gui/components/options/validators.py | geosaleh/Gooey | c2aa8afc269271d55f011c6bc89828992a30b3f0 | [
"MIT"
] | null | null | null | import re
from functools import wraps
from gooey.gui.components.filtering.prefix_filter import OperatorType
class SuperBool(object):
"""
A boolean which keeps with it the rationale
for when it is false.
"""
__nonzero__ = __bool__
def lift(f):
"""
Lifts a basic predicate to the SuperBool type
stealing the docstring as the rationale message.
This is largely just goofing around and experimenting
since it's a private internal API.
"""
@wraps(f)
return inner
@lift
def is_tuple_or_list(value):
"""Must be either a list or tuple"""
return isinstance(value, list) or isinstance(value, tuple)
@lift
def is_str(value):
"""Must be of type `str`"""
return isinstance(value, str)
@lift
def is_str_or_coll(value):
"""
Colors must be either a hex string or collection of RGB values.
e.g.
Hex string: #fff0ce
RGB Collection: [0, 255, 128] or (0, 255, 128)
"""
return bool(is_str(value)) or bool(is_tuple_or_list(value))
@lift
def has_valid_channel_values(rgb_coll):
"""Colors in an RGB collection must all be in the range 0-255"""
return all([is_0to255(c) and is_int(c) for c in rgb_coll])
@lift
def is_three_channeled(value):
"""Missing channels! Colors in an RGB collection should be of the form [R,G,B] or (R,G,B)"""
return len(value) == 3
@lift
def is_hex_string(value: str):
"""Invalid hexadecimal format. Expected: "#FFFFFF" """
return isinstance(value, str) and bool(re.match('^#[\dABCDEF]{6}$', value, flags=2))
@lift
def is_bool(value):
"""Must be of type Boolean"""
return isinstance(value, bool)
@lift
def non_empty_string(value):
"""Must be a non-empty non-blank string"""
return bool(value) and bool(value.strip())
@lift
def is_tokenization_operator(value):
"""Operator must be a valid OperatorType i.e. one of: (AND, OR)"""
return bool(value) in (OperatorType.AND, OperatorType.OR)
@lift
def is_tokenizer(value):
"""Tokenizers must be valid Regular expressions. see: options.PrefixTokenizers"""
return bool(non_empty_string(value))
@lift
def is_int(value):
"""Invalid type. Expected `int`"""
return isinstance(value, int)
@lift
def is_0to255(value):
"""RGB values must be in the range 0 - 255 (inclusive)"""
return 0 <= value <= 255
def is_0to20(value):
"""Precision values must be in the range 0 - 20 (inclusive)"""
return 0 <= value <= 20
@lift
def is_valid_color(value):
"""Must be either a valid hex string or RGB list"""
if is_str(value):
return is_hex_string(value)
elif is_tuple_or_list(value):
return (is_tuple_or_list(value)
and is_three_channeled(value)
and has_valid_channel_values(value))
else:
return is_str_or_coll(value)
validators = {
'label_color': is_valid_color,
'label_bg_color': is_valid_color,
'help_color': is_valid_color,
'help_bg_color': is_valid_color,
'error_color': is_valid_color,
'error_bg_color': is_valid_color,
'show_label': is_bool,
'show_help': is_bool,
'visible': is_bool,
'full_width': is_bool,
'height': is_int,
'readonly': is_bool,
'initial_selection': is_int,
'title': non_empty_string,
'checkbox_label': non_empty_string,
'placeholder': non_empty_string,
'empty_message': non_empty_string,
'max_size': is_int,
'choice_tokenizer': is_tokenizer,
'input_tokenizer': is_tokenizer,
'ignore_case': is_bool,
'operator': is_tokenization_operator,
'index_suffix': is_bool,
'wildcard': non_empty_string,
'default_dir': non_empty_string,
'default_file': non_empty_string,
'default_path': non_empty_string,
'message': non_empty_string,
'precision': is_0to20
}
if __name__ == '__main__':
# TODO: there should be tests
pass
# print(validateColor((1, 'ergerg', 1234)))
# print(validateColor(1234))
# print(validateColor(123.234))
# print(validateColor('123.234'))
# print(validateColor('FFFAAA'))
# print(validateColor('#FFFAAA'))
# print(validateColor([]))
# print(validateColor(()))
# print(validateColor((1, 2)))
# print(validateColor((1, 2, 1234)))
# print(is_lifted(lift(is_int)))
# print(is_lifted(is_int))
# print(OR(is_poop, is_int)('poop'))
# print(AND(is_poop, is_lower, is_lower)('pooP'))
# print(OR(is_poop, is_int))
# print(is_lifted(OR(is_poop, is_int)))
# print(validate(is_valid_color, [255, 255, 256]))
# print(is_valid_color('#fff000'))
# print(is_valid_color([255, 244, 256]))
# print(non_empty_string('asdf') and non_empty_string('asdf'))
# validate(is_valid_color, 1234)
| 26.519802 | 96 | 0.663244 | import re
from functools import wraps
from gooey.gui.components.filtering.prefix_filter import OperatorType
class SuperBool(object):
"""
A boolean which keeps with it the rationale
for when it is false.
"""
def __init__(self, value, rationale):
self.value = value
self.rationale = rationale
def __bool__(self):
return self.value
__nonzero__ = __bool__
def __str__(self):
return str(self.value)
def lift(f):
"""
Lifts a basic predicate to the SuperBool type
stealing the docstring as the rationale message.
This is largely just goofing around and experimenting
since it's a private internal API.
"""
@wraps(f)
def inner(value):
result = f(value)
return SuperBool(result, f.__doc__) if not isinstance(result, SuperBool) else result
return inner
@lift
def is_tuple_or_list(value):
"""Must be either a list or tuple"""
return isinstance(value, list) or isinstance(value, tuple)
@lift
def is_str(value):
"""Must be of type `str`"""
return isinstance(value, str)
@lift
def is_str_or_coll(value):
"""
Colors must be either a hex string or collection of RGB values.
e.g.
Hex string: #fff0ce
RGB Collection: [0, 255, 128] or (0, 255, 128)
"""
return bool(is_str(value)) or bool(is_tuple_or_list(value))
@lift
def has_valid_channel_values(rgb_coll):
"""Colors in an RGB collection must all be in the range 0-255"""
return all([is_0to255(c) and is_int(c) for c in rgb_coll])
@lift
def is_three_channeled(value):
"""Missing channels! Colors in an RGB collection should be of the form [R,G,B] or (R,G,B)"""
return len(value) == 3
@lift
def is_hex_string(value: str):
"""Invalid hexadecimal format. Expected: "#FFFFFF" """
return isinstance(value, str) and bool(re.match('^#[\dABCDEF]{6}$', value, flags=2))
@lift
def is_bool(value):
"""Must be of type Boolean"""
return isinstance(value, bool)
@lift
def non_empty_string(value):
"""Must be a non-empty non-blank string"""
return bool(value) and bool(value.strip())
@lift
def is_tokenization_operator(value):
"""Operator must be a valid OperatorType i.e. one of: (AND, OR)"""
return bool(value) in (OperatorType.AND, OperatorType.OR)
@lift
def is_tokenizer(value):
"""Tokenizers must be valid Regular expressions. see: options.PrefixTokenizers"""
return bool(non_empty_string(value))
@lift
def is_int(value):
"""Invalid type. Expected `int`"""
return isinstance(value, int)
@lift
def is_0to255(value):
"""RGB values must be in the range 0 - 255 (inclusive)"""
return 0 <= value <= 255
def is_0to20(value):
"""Precision values must be in the range 0 - 20 (inclusive)"""
return 0 <= value <= 20
@lift
def is_valid_color(value):
"""Must be either a valid hex string or RGB list"""
if is_str(value):
return is_hex_string(value)
elif is_tuple_or_list(value):
return (is_tuple_or_list(value)
and is_three_channeled(value)
and has_valid_channel_values(value))
else:
return is_str_or_coll(value)
validators = {
'label_color': is_valid_color,
'label_bg_color': is_valid_color,
'help_color': is_valid_color,
'help_bg_color': is_valid_color,
'error_color': is_valid_color,
'error_bg_color': is_valid_color,
'show_label': is_bool,
'show_help': is_bool,
'visible': is_bool,
'full_width': is_bool,
'height': is_int,
'readonly': is_bool,
'initial_selection': is_int,
'title': non_empty_string,
'checkbox_label': non_empty_string,
'placeholder': non_empty_string,
'empty_message': non_empty_string,
'max_size': is_int,
'choice_tokenizer': is_tokenizer,
'input_tokenizer': is_tokenizer,
'ignore_case': is_bool,
'operator': is_tokenization_operator,
'index_suffix': is_bool,
'wildcard': non_empty_string,
'default_dir': non_empty_string,
'default_file': non_empty_string,
'default_path': non_empty_string,
'message': non_empty_string,
'precision': is_0to20
}
def collect_errors(predicates, m):
return {
k:predicates[k](v).rationale
for k,v in m.items()
if k in predicates and not predicates[k](v)}
def validate(pred, value):
result = pred(value)
if not result:
raise ValueError(result.rationale)
if __name__ == '__main__':
# TODO: there should be tests
pass
# print(validateColor((1, 'ergerg', 1234)))
# print(validateColor(1234))
# print(validateColor(123.234))
# print(validateColor('123.234'))
# print(validateColor('FFFAAA'))
# print(validateColor('#FFFAAA'))
# print(validateColor([]))
# print(validateColor(()))
# print(validateColor((1, 2)))
# print(validateColor((1, 2, 1234)))
# print(is_lifted(lift(is_int)))
# print(is_lifted(is_int))
# print(OR(is_poop, is_int)('poop'))
# print(AND(is_poop, is_lower, is_lower)('pooP'))
# print(OR(is_poop, is_int))
# print(is_lifted(OR(is_poop, is_int)))
# print(validate(is_valid_color, [255, 255, 256]))
# print(is_valid_color('#fff000'))
# print(is_valid_color([255, 244, 256]))
# print(non_empty_string('asdf') and non_empty_string('asdf'))
# validate(is_valid_color, 1234)
| 482 | 0 | 152 |
5807257790cef3d53abb06fe45677a87bcfe3f8b | 3,812 | py | Python | server/iotud/api/devices.py | hollwann/dashboard-iot-udistrital | a92c6b65fce5c343abeffcb5badf1f4bfd9ab1f2 | [
"MIT"
] | 2 | 2020-07-02T19:09:12.000Z | 2020-07-05T00:33:55.000Z | server/iotud/api/devices.py | hollwann/dashboard-iot-udistrital | a92c6b65fce5c343abeffcb5badf1f4bfd9ab1f2 | [
"MIT"
] | 3 | 2020-07-05T00:55:08.000Z | 2022-02-27T11:29:51.000Z | server/iotud/api/devices.py | hollwann/dashboard-iot-udistrital | a92c6b65fce5c343abeffcb5badf1f4bfd9ab1f2 | [
"MIT"
] | null | null | null | import functools
from datetime import datetime
from flask import Blueprint, jsonify, request
from iotud.tools import fetch_all, fetch_one, update, insert, get_auth_props, either_response, delete
from string import ascii_lowercase
import random
from oslash import Right, Left
from toolz import accumulate, assoc, reduce
bp = Blueprint('devices', __name__, url_prefix="/users")
@bp.route('/create_device', methods=['POST'])
@bp.route('/delete_device', methods=['POST'])
@bp.route('/update_device', methods=['POST'])
@bp.route('/get_devices', methods=['POST'])
@bp.route('/get_device', methods=['POST'])
| 35.626168 | 101 | 0.644019 | import functools
from datetime import datetime
from flask import Blueprint, jsonify, request
from iotud.tools import fetch_all, fetch_one, update, insert, get_auth_props, either_response, delete
from string import ascii_lowercase
import random
from oslash import Right, Left
from toolz import accumulate, assoc, reduce
bp = Blueprint('devices', __name__, url_prefix="/users")
@bp.route('/create_device', methods=['POST'])
def create_device():
data = get_auth_props(['name', 'description'],
request.get_json(),
request.headers.get('Authorization'))
addedDevice = data.bind(add_device_db)
return either_response(addedDevice, 'Dispositivo creado con exito.')
@bp.route('/delete_device', methods=['POST'])
def delete_device():
data = get_auth_props(['id_device'],
request.get_json(),
request.headers.get('Authorization'))
deletedDevice = data.bind(check_device_ownership).bind(delete_device_db)
return either_response(deletedDevice, 'Dispositivo eliminado con exito.')
@bp.route('/update_device', methods=['POST'])
def update_device():
data = get_auth_props(['id_device', 'name', 'description'],
request.get_json(),
request.headers.get('Authorization'))
updatedDevice = data.bind(check_device_ownership).bind(update_device_db)
return either_response(updatedDevice, 'Dispositivo actualizado con exito.')
@bp.route('/get_devices', methods=['POST'])
def get_devices():
data = get_auth_props([],
request.get_json(),
request.headers.get('Authorization'))
devices = data.bind(get_devices_db).map(lambda x: {"devices": x})
return either_response(devices)
@bp.route('/get_device', methods=['POST'])
def get_device():
data = get_auth_props(['id_device'],
request.get_json(),
request.headers.get('Authorization'))
devices = data.bind(check_device_ownership).bind(
get_device_db).map(lambda x: {"device": x})
return either_response(devices)
def check_device_ownership(data: dict):
query = "SELECT * FROM devices WHERE id_device = %s AND id_user = %s"
vals = (data["data"]["id_device"], data["user"]["id_user"])
device = fetch_one(query, vals)
return device.bind(lambda device: Left('UD006') if device is None else Right(data))
def get_device_db(data: dict):
query = "SELECT * FROM devices WHERE id_user = %s AND id_device = %s"
vals = (data["user"]["id_user"], data["data"]["id_device"])
device = fetch_one(query, vals)
return device
def get_devices_db(data: dict):
query = "SELECT * FROM devices WHERE id_user = %s"
vals = (data["user"]["id_user"],)
devices = fetch_all(query, vals)
return devices
def add_device_db(data: dict):
query = "INSERT INTO devices(name, description, variables_number,\
api_key_read, api_key_write, timestamp, id_user)\
VALUES (%s, %s, %s, %s, %s, %s, %s)"
vals = (data["data"]["name"], data["data"]["description"], 0,
gen_apikey(), gen_apikey(), datetime.now(), data["user"]["id_user"])
return insert(query, vals)
def update_device_db(data: dict):
query = "UPDATE devices SET name = %s, description = %s \
WHERE id_device = %s"
vals = (data["data"]["name"], data["data"]
["description"], data["data"]["id_device"])
return update(query, vals)
def delete_device_db(data: dict):
query = "DELETE FROM devices WHERE id_device = %s"
vals = (data["data"]["id_device"], )
return delete(query, vals)
def gen_apikey():
apikey = ''.join(random.choice(ascii_lowercase) for i in range(32))
return apikey
| 2,920 | 0 | 271 |
80d14ac67554130253c5695660674cb336aa2294 | 2,598 | py | Python | templates/movie_info_popup.py | rikbarker/watcher | dadacd21a5790ee609058a98a17fcc8954d24439 | [
"Apache-2.0"
] | 194 | 2016-12-23T19:11:09.000Z | 2020-12-07T04:04:10.000Z | templates/movie_info_popup.py | rikbarker/watcher | dadacd21a5790ee609058a98a17fcc8954d24439 | [
"Apache-2.0"
] | 236 | 2016-11-20T07:56:15.000Z | 2017-04-12T12:10:00.000Z | templates/movie_info_popup.py | rikbarker/watcher | dadacd21a5790ee609058a98a17fcc8954d24439 | [
"Apache-2.0"
] | 51 | 2016-11-20T08:05:33.000Z | 2021-01-26T13:22:40.000Z | import json
import core
from core.movieinfo import Trailer
from dominate.tags import *
# pylama:ignore=W0401
| 37.652174 | 158 | 0.54311 | import json
import core
from core.movieinfo import Trailer
from dominate.tags import *
class MovieInfoPopup():
def __init__(self):
return
def html(self, data_json):
'''
data: str json object movie data dict
'''
data = json.loads(data_json)
trailer = Trailer()
title_date = data['title'] + ' ' + data['release_date'][:4]
youtube_id = trailer.get_trailer(title_date)
tmdb_url = u"https://www.themoviedb.org/movie/{}".format(data['id'])
if youtube_id:
trailer_embed = u"https://www.youtube.com/embed/{}?&showinfo=0".format(youtube_id)
else:
trailer_embed = ''
if data['poster_path'] is None:
data['poster_path'] = core.URL_BASE + '/static/images/missing_poster.jpg'
else:
data['poster_path'] = u'http://image.tmdb.org/t/p/w300{}'.format(data['poster_path'])
container = div(id='container')
with container:
script(type='text/javascript', src=core.URL_BASE + '/static/js/add_movie/movie_info_popup.js?v=02.02b')
with div(id='title'):
span(title_date, id='title')
i(cls='fa fa-plus', id='button_add')
with div('Quality profile: ', cls='hidden', id='quality'):
with select(id='quality_profile', value='Default'):
options = core.CONFIG['Quality']['Profiles'].keys()
for opt in options:
if opt == 'Default':
option(opt, value='Default')
else:
option(opt, value=opt)
i(id='button_save', cls='fa fa-save')
with div(id='media'):
img(id='poster', src=data['poster_path'])
iframe(id='trailer', width="640", height="360", src=trailer_embed, frameborder="0")
with div(id='plot'):
p(data['overview'])
with div(id='additional_info'):
with a(href=tmdb_url, target='_blank'):
p(u'TMDB Score: {}'.format(data['vote_average']))
span(u'Theatrical Release Date: {}'.format(data['release_date']))
div(data_json, id='hidden_data', cls='hidden')
return unicode(container)
def no_data(self):
message = "<div id='container'><span>Unable to retrive movie information. Try again in a few minutes or check logs for more information.</span></div>"
return message
# pylama:ignore=W0401
| 192 | 2,271 | 23 |
8a78cc74ff0b5595f02947a1d7e6f8bbbc2fdf09 | 196 | py | Python | scripts/item/consume_2436229.py | lynsone/swordie | 7e9d564c1f2659a87e01c376089e1ee0a3842c5b | [
"MIT"
] | 2 | 2020-04-15T03:16:07.000Z | 2020-08-12T23:28:32.000Z | scripts/item/consume_2436229.py | lynsone/swordie | 7e9d564c1f2659a87e01c376089e1ee0a3842c5b | [
"MIT"
] | null | null | null | scripts/item/consume_2436229.py | lynsone/swordie | 7e9d564c1f2659a87e01c376089e1ee0a3842c5b | [
"MIT"
] | 3 | 2020-08-25T06:55:25.000Z | 2020-12-01T13:07:43.000Z | # Pig Bar Damage Skin
success = sm.addDamageSkin(2436229)
if success:
sm.chat("The Pig Bar Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2436229)
| 32.666667 | 95 | 0.739796 | # Pig Bar Damage Skin
success = sm.addDamageSkin(2436229)
if success:
sm.chat("The Pig Bar Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2436229)
| 0 | 0 | 0 |
9776a56824d42f2bff18ed175f92278719ddf139 | 93 | py | Python | jhu_primitives/euclidean_nomination/__init__.py | remram44/primitives-interfaces | f6d305d6f65fc8c89c14bef6f2b8b4d86d44005b | [
"Apache-2.0"
] | null | null | null | jhu_primitives/euclidean_nomination/__init__.py | remram44/primitives-interfaces | f6d305d6f65fc8c89c14bef6f2b8b4d86d44005b | [
"Apache-2.0"
] | 23 | 2017-09-20T08:12:13.000Z | 2022-03-01T01:49:11.000Z | jhu_primitives/euclidean_nomination/__init__.py | remram44/primitives-interfaces | f6d305d6f65fc8c89c14bef6f2b8b4d86d44005b | [
"Apache-2.0"
] | 8 | 2018-05-14T18:44:38.000Z | 2021-03-18T19:53:23.000Z | from __future__ import absolute_import
from .euclidean_nomination import EuclideanNomination
| 31 | 53 | 0.903226 | from __future__ import absolute_import
from .euclidean_nomination import EuclideanNomination
| 0 | 0 | 0 |
67358a218c8de9c7a4a7b5c11a14dc74a49ac17a | 403 | py | Python | Startup.py | TimothyBergstrom/Zergy | ca9e51afccd0907dc343c36e3421211ed9861319 | [
"Apache-2.0"
] | null | null | null | Startup.py | TimothyBergstrom/Zergy | ca9e51afccd0907dc343c36e3421211ed9861319 | [
"Apache-2.0"
] | 1 | 2017-11-06T21:59:41.000Z | 2017-11-06T21:59:41.000Z | Startup.py | TimothyBergstrom/Zergy | ca9e51afccd0907dc343c36e3421211ed9861319 | [
"Apache-2.0"
] | null | null | null | import subprocess
import os
try:
Settings=[]
f=open('data/Settings.txt','r')
_lines=f.readlines()
Settings.append([i.replace('\n','') for i in _lines])
Settings=Settings[0] #Because list in list
f.close()
except:
Settings=['Manual: Fill',5,0,'No','No']
try:
subprocess.Popen(['GUI.exe'],stdout=subprocess.PIPE,creationflags=0x08000000)
except:
os.system('GUI.py')
| 21.210526 | 81 | 0.652605 | import subprocess
import os
try:
Settings=[]
f=open('data/Settings.txt','r')
_lines=f.readlines()
Settings.append([i.replace('\n','') for i in _lines])
Settings=Settings[0] #Because list in list
f.close()
except:
Settings=['Manual: Fill',5,0,'No','No']
try:
subprocess.Popen(['GUI.exe'],stdout=subprocess.PIPE,creationflags=0x08000000)
except:
os.system('GUI.py')
| 0 | 0 | 0 |
598d01312b8f1651df66dcdb2419ccaee00465e3 | 280 | py | Python | src/stemming.py | Stoeoeoe/InnovationThesis | eaf8e419bdd8d0ee33f30a789c5ac93633ae3062 | [
"MIT"
] | null | null | null | src/stemming.py | Stoeoeoe/InnovationThesis | eaf8e419bdd8d0ee33f30a789c5ac93633ae3062 | [
"MIT"
] | null | null | null | src/stemming.py | Stoeoeoe/InnovationThesis | eaf8e419bdd8d0ee33f30a789c5ac93633ae3062 | [
"MIT"
] | null | null | null | from nltk import stem
from nltk import tokenize
from nltk import tag
from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
text1 = 'His acting was amazing.'
text2 = 'He was merely acting.'
print(pos_tag(word_tokenize(text1)))
print(pos_tag(word_tokenize(text2))) | 23.333333 | 39 | 0.792857 | from nltk import stem
from nltk import tokenize
from nltk import tag
from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
text1 = 'His acting was amazing.'
text2 = 'He was merely acting.'
print(pos_tag(word_tokenize(text1)))
print(pos_tag(word_tokenize(text2))) | 0 | 0 | 0 |
d623c86c78e6b71a533d2abdd161fcf6efba9130 | 935 | py | Python | test/test_miner-search.py | sckott/pyminer | 00f05067f536465d664999d613e4c37500653976 | [
"MIT"
] | 6 | 2016-02-02T21:29:11.000Z | 2020-08-16T14:11:19.000Z | test/test_miner-search.py | sckott/pyminer | 00f05067f536465d664999d613e4c37500653976 | [
"MIT"
] | 9 | 2016-11-12T16:14:13.000Z | 2020-09-25T13:15:42.000Z | test/test_miner-search.py | sckott/pyminer | 00f05067f536465d664999d613e4c37500653976 | [
"MIT"
] | null | null | null | """Tests for miner module - search methods"""
import os
import pytest
import vcr
from pyminer import Miner
m = Miner()
@vcr.use_cassette('test/vcr_cassettes/search.yaml')
def test_search():
"miner.search - basic test"
res = m.search(filter = {'has_full_text': True}, limit = 5)
assert 'Mined' == res.__class__.__name__
assert list == res.items.__class__
assert dict == res.result.__class__
assert 'method' == res.links.__class__.__name__
@vcr.use_cassette('test/vcr_cassettes/search_limit.yaml')
def test_search_limit():
"miner.search - limit"
res = m.search(filter = {'has_full_text': True}, limit = 1)
assert 'Mined' == res.__class__.__name__
assert 1 == len(res.items)
# def test_search_filter():
# "miner.search - diff taxonKey2"
# res = miner.search()
# assert 'dict' == res.__class__.__name__
# assert 6 == len(res)
# assert 2683264 == res['results'][0]['taxonKey']
| 31.166667 | 63 | 0.677005 | """Tests for miner module - search methods"""
import os
import pytest
import vcr
from pyminer import Miner
m = Miner()
@vcr.use_cassette('test/vcr_cassettes/search.yaml')
def test_search():
"miner.search - basic test"
res = m.search(filter = {'has_full_text': True}, limit = 5)
assert 'Mined' == res.__class__.__name__
assert list == res.items.__class__
assert dict == res.result.__class__
assert 'method' == res.links.__class__.__name__
@vcr.use_cassette('test/vcr_cassettes/search_limit.yaml')
def test_search_limit():
"miner.search - limit"
res = m.search(filter = {'has_full_text': True}, limit = 1)
assert 'Mined' == res.__class__.__name__
assert 1 == len(res.items)
# def test_search_filter():
# "miner.search - diff taxonKey2"
# res = miner.search()
# assert 'dict' == res.__class__.__name__
# assert 6 == len(res)
# assert 2683264 == res['results'][0]['taxonKey']
| 0 | 0 | 0 |
5fd15b5c667704a8d8a73b148dadac94ae602905 | 1,961 | py | Python | make/photon/prepare/tests/migrations/utils_test.py | gerhardgossen/harbor | 1d03b8727acb9a3935bf45cd76b61f87c68e2a08 | [
"Apache-2.0"
] | 1 | 2020-07-11T09:12:18.000Z | 2020-07-11T09:12:18.000Z | make/photon/prepare/tests/migrations/utils_test.py | gerhardgossen/harbor | 1d03b8727acb9a3935bf45cd76b61f87c68e2a08 | [
"Apache-2.0"
] | 10 | 2021-05-31T00:06:59.000Z | 2022-02-11T12:34:16.000Z | make/photon/prepare/tests/migrations/utils_test.py | gerhardgossen/harbor | 1d03b8727acb9a3935bf45cd76b61f87c68e2a08 | [
"Apache-2.0"
] | 1 | 2020-07-12T16:51:07.000Z | 2020-07-12T16:51:07.000Z | import pytest
import importlib
from utils.migration import search
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 33.237288 | 79 | 0.721571 | import pytest
import importlib
from utils.migration import search
class mockModule:
def __init__(self, revision, down_revision):
self.revision = revision
self.down_revision = down_revision
def mock_import_module_loop(module_path: str):
loop_modules = {
'migration.versions.1_9_0': mockModule('1.9.0', None),
'migration.versions.1_10_0': mockModule('1.10.0', '2.0.0'),
'migration.versions.2_0_0': mockModule('2.0.0', '1.10.0')
}
return loop_modules[module_path]
def mock_import_module_mission(module_path: str):
loop_modules = {
'migration.versions.1_9_0': mockModule('1.9.0', None),
'migration.versions.1_10_0': mockModule('1.10.0', None),
'migration.versions.2_0_0': mockModule('2.0.0', '1.10.0')
}
return loop_modules[module_path]
def mock_import_module_success(module_path: str):
loop_modules = {
'migration.versions.1_9_0': mockModule('1.9.0', None),
'migration.versions.1_10_0': mockModule('1.10.0', '1.9.0'),
'migration.versions.2_0_0': mockModule('2.0.0', '1.10.0')
}
return loop_modules[module_path]
@pytest.fixture
def mock_import_module_with_loop(monkeypatch):
monkeypatch.setattr(importlib, "import_module", mock_import_module_loop)
@pytest.fixture
def mock_import_module_with_mission(monkeypatch):
monkeypatch.setattr(importlib, "import_module", mock_import_module_mission)
@pytest.fixture
def mock_import_module_with_success(monkeypatch):
monkeypatch.setattr(importlib, "import_module", mock_import_module_success)
def test_search_loop(mock_import_module_with_loop):
with pytest.raises(Exception):
search('1.9.0', '2.0.0')
def test_search_mission(mock_import_module_with_mission):
with pytest.raises(Exception):
search('1.9.0', '2.0.0')
def test_search_success():
migration_path = search('1.9.0', '2.0.0')
assert migration_path[0].revision == '1.10.0'
assert migration_path[1].revision == '2.0.0'
| 1,594 | -4 | 253 |
502c09b74ab49cdb0e21fe1b8efa995fdfd36c8b | 2,168 | py | Python | infoblox_netmri/api/remote/models/device_certificate_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/device_certificate_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/device_certificate_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceCertificateRemote(RemoteModel):
"""
This table list out the device certificates.
| ``id:`` The internal NetMRI identifier for this device certificate.
| ``attribute type:`` number
| ``created_at:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``updated_at:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``certificate:`` The certificate BLOB data.
| ``attribute type:`` string
| ``certificate_parameter:`` The certificate parameters.
| ``attribute type:`` string
| ``certificate_type:`` The type of certificate.
| ``attribute type:`` string
| ``UnitID:`` The internal NetMRI identifier for collector.
| ``attribute type:`` number
| ``ip_address_dotted:`` The IP address as string.
| ``attribute type:`` string
| ``ip_address_numeric:`` The IP address as number.
| ``attribute type:`` number
| ``name:`` The certificate name.
| ``attribute type:`` string
| ``username:`` The username for certificate.
| ``attribute type:`` string
| ``password:`` The password for certificate.
| ``attribute type:`` string
| ``passphrase:`` The passphrase for certificate.
| ``attribute type:`` string
| ``port:`` Port number to use for connection with this certificate.
| ``attribute type:`` number
"""
properties = ("id",
"created_at",
"updated_at",
"certificate",
"certificate_parameter",
"certificate_type",
"UnitID",
"ip_address_dotted",
"ip_address_numeric",
"name",
"username",
"password",
"passphrase",
"port",
)
| 25.809524 | 84 | 0.5369 | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceCertificateRemote(RemoteModel):
"""
This table list out the device certificates.
| ``id:`` The internal NetMRI identifier for this device certificate.
| ``attribute type:`` number
| ``created_at:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``updated_at:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``certificate:`` The certificate BLOB data.
| ``attribute type:`` string
| ``certificate_parameter:`` The certificate parameters.
| ``attribute type:`` string
| ``certificate_type:`` The type of certificate.
| ``attribute type:`` string
| ``UnitID:`` The internal NetMRI identifier for collector.
| ``attribute type:`` number
| ``ip_address_dotted:`` The IP address as string.
| ``attribute type:`` string
| ``ip_address_numeric:`` The IP address as number.
| ``attribute type:`` number
| ``name:`` The certificate name.
| ``attribute type:`` string
| ``username:`` The username for certificate.
| ``attribute type:`` string
| ``password:`` The password for certificate.
| ``attribute type:`` string
| ``passphrase:`` The passphrase for certificate.
| ``attribute type:`` string
| ``port:`` Port number to use for connection with this certificate.
| ``attribute type:`` number
"""
properties = ("id",
"created_at",
"updated_at",
"certificate",
"certificate_parameter",
"certificate_type",
"UnitID",
"ip_address_dotted",
"ip_address_numeric",
"name",
"username",
"password",
"passphrase",
"port",
)
| 0 | 0 | 0 |
d530591ad3e0b6d1d80a67611a179ab599febfb7 | 1,830 | py | Python | neural_network.py | maxlorenz/Simple_NN | 2752663fd3ccacee42e339859060eed5125f61d5 | [
"Apache-2.0"
] | 88 | 2017-02-25T14:44:01.000Z | 2022-01-28T23:24:04.000Z | neural_network.py | maxlorenz/Simple_NN | 2752663fd3ccacee42e339859060eed5125f61d5 | [
"Apache-2.0"
] | 2 | 2017-02-25T19:36:21.000Z | 2017-02-27T21:59:18.000Z | neural_network.py | maxlorenz/Simple_NN | 2752663fd3ccacee42e339859060eed5125f61d5 | [
"Apache-2.0"
] | 10 | 2017-02-25T19:20:03.000Z | 2018-08-21T12:05:03.000Z | from random import random, choice
if __name__ == "__main__":
nn = NeuralNetwork(inputs=2, hidden_neurons=4)
training = [([0, 0], 0), ([0, 1], 1), ([1, 0], 1), ([1, 1], 0)]
for epoch in range(100000):
input, target = choice(training)
nn.learn(input, target)
for input, target in training:
print('IN: {}, EXPECTED: {}, RESULT: {:.2f}'
.format(input, target, nn.predict(input)))
| 26.911765 | 69 | 0.571585 | from random import random, choice
class Neuron(object):
def __init__(self, num_inputs):
self.inputs = []
self.learning_rate = 0.01
self.weights = [random() for _ in range(num_inputs)]
self.bias = random()
@staticmethod
def activation(x):
"""ReLU function"""
return (x > 0) * x
@staticmethod
def sensitivity(x):
"""Derivative of ReLU"""
return (x > 0) * 1
def output(self):
c = sum([w * i for w, i in zip(self.weights, self.inputs)])
return self.activation(self.bias + c)
def adjust(self, error):
correction = self.sensitivity(self.output())
correction *= self.learning_rate * error
self.weights = [w + correction * i
for w, i in zip(self.weights, self.inputs)]
self.bias += correction
class NeuralNetwork(object):
def __init__(self, inputs=2, hidden_neurons=2):
self.hidden = [Neuron(inputs) for _ in range(hidden_neurons)]
self.y = Neuron(hidden_neurons)
def predict(self, input):
for h in self.hidden:
h.inputs = input
self.y.inputs = [h.output() for h in self.hidden]
return self.y.output()
def learn(self, input, target):
error = target - self.predict(input)
self.y.adjust(error)
for h, w in zip(self.hidden, self.y.weights):
h.adjust(error * w)
if __name__ == "__main__":
nn = NeuralNetwork(inputs=2, hidden_neurons=4)
training = [([0, 0], 0), ([0, 1], 1), ([1, 0], 1), ([1, 1], 0)]
for epoch in range(100000):
input, target = choice(training)
nn.learn(input, target)
for input, target in training:
print('IN: {}, EXPECTED: {}, RESULT: {:.2f}'
.format(input, target, nn.predict(input)))
| 978 | 288 | 127 |
351137de7e2941c3ec14e6b66c19e390ce537c11 | 1,562 | py | Python | coastl/stl_toolkit/stl_processing.py | prathgan/COASTL | 2ee009964f8bafc2d108aba6554f230549cb09e3 | [
"MIT"
] | null | null | null | coastl/stl_toolkit/stl_processing.py | prathgan/COASTL | 2ee009964f8bafc2d108aba6554f230549cb09e3 | [
"MIT"
] | null | null | null | coastl/stl_toolkit/stl_processing.py | prathgan/COASTL | 2ee009964f8bafc2d108aba6554f230549cb09e3 | [
"MIT"
] | null | null | null | import re
from gurobipy import *
from .stl_parsing import parse_logic
from .stl_constraints import create_constraints
from .utilities.simple_utilities import remove_gurobi_log, parentheses_match
def solve(logic):
"""Finds solutions for all binary and continuous variables in logic string"""
return synthesize_stl(create_model_stl(parse_stl(logic)),remove_log=True).getVars()
def parse_stl(logic, remove_log=False):
"""Parses string of STL logic into an STL tree"""
if not parentheses_match(logic):
raise ValueError("Opening and closing brackets do not match, check '(' and ')'")
stl_tree = parse_logic(logic, None, None)
return stl_tree
def synthesize_stl(stl_node, ret_type=0, remove_log=False, console_log=True, maximize_vars=None, minimize_vars=None):
"""creates constraints and synthesizes solutions for variables in STL tree, returns optimized model"""
m = create_constraints(stl_node, maximize_vars=maximize_vars, minimize_vars=minimize_vars, console_log=console_log)
m.optimize()
if remove_log:
remove_gurobi_log()
if ret_type==1:
return m.getVars()
else:
return m
def create_model_stl(stl_node, remove_log=False, console_log=True, maximize_vars=None, minimize_vars=None):
"""Creates constrained MILP gurobi model for STL tree"""
return create_constraints(stl_node, maximize_vars=maximize_vars, minimize_vars=minimize_vars, console_log=console_log)
def synthesize_stl(m, ret_type=0):
"""Synthesizes solutions for variables in model m by optimizing model"""
m.optimize()
if ret_type==1:
return m.getVars()
else:
return m
| 39.05 | 119 | 0.792574 | import re
from gurobipy import *
from .stl_parsing import parse_logic
from .stl_constraints import create_constraints
from .utilities.simple_utilities import remove_gurobi_log, parentheses_match
def solve(logic):
"""Finds solutions for all binary and continuous variables in logic string"""
return synthesize_stl(create_model_stl(parse_stl(logic)),remove_log=True).getVars()
def parse_stl(logic, remove_log=False):
"""Parses string of STL logic into an STL tree"""
if not parentheses_match(logic):
raise ValueError("Opening and closing brackets do not match, check '(' and ')'")
stl_tree = parse_logic(logic, None, None)
return stl_tree
def synthesize_stl(stl_node, ret_type=0, remove_log=False, console_log=True, maximize_vars=None, minimize_vars=None):
"""creates constraints and synthesizes solutions for variables in STL tree, returns optimized model"""
m = create_constraints(stl_node, maximize_vars=maximize_vars, minimize_vars=minimize_vars, console_log=console_log)
m.optimize()
if remove_log:
remove_gurobi_log()
if ret_type==1:
return m.getVars()
else:
return m
def create_model_stl(stl_node, remove_log=False, console_log=True, maximize_vars=None, minimize_vars=None):
"""Creates constrained MILP gurobi model for STL tree"""
return create_constraints(stl_node, maximize_vars=maximize_vars, minimize_vars=minimize_vars, console_log=console_log)
def synthesize_stl(m, ret_type=0):
"""Synthesizes solutions for variables in model m by optimizing model"""
m.optimize()
if ret_type==1:
return m.getVars()
else:
return m
| 0 | 0 | 0 |
2a025598c7e823c92ad2cf67003a731e4969487c | 781 | py | Python | tests/unit/test__main__.py | antonku/ncssl_api_client | c463b000960d50368d39bde2a180499f1da3a29a | [
"MIT"
] | 8 | 2017-11-28T11:05:52.000Z | 2021-11-16T13:52:45.000Z | tests/unit/test__main__.py | antonku/ncssl_api_client | c463b000960d50368d39bde2a180499f1da3a29a | [
"MIT"
] | 4 | 2018-12-23T14:52:11.000Z | 2019-08-09T21:01:44.000Z | tests/unit/test__main__.py | antonku/ncssl_api_client | c463b000960d50368d39bde2a180499f1da3a29a | [
"MIT"
] | 2 | 2017-11-28T14:38:24.000Z | 2017-11-29T09:03:20.000Z | from ncssl_api_client.__main__ import main_cli_wrapper
from unittest import TestCase
import mock
| 33.956522 | 80 | 0.733675 | from ncssl_api_client.__main__ import main_cli_wrapper
from unittest import TestCase
import mock
class MainTest(TestCase):
@mock.patch('ncssl_api_client.__main__.main', return_value=mock.MagicMock())
def test_status_code_zero_on_success(self, result_mock):
result_mock().is_successful.return_value = True
with self.assertRaises(SystemExit) as cm:
main_cli_wrapper()
self.assertEqual(cm.exception.code, 0)
@mock.patch('ncssl_api_client.__main__.main', return_value=mock.MagicMock())
def test_status_code_one_on_failure(self, result_mock):
result_mock().is_successful.return_value = False
with self.assertRaises(SystemExit) as cm:
main_cli_wrapper()
self.assertEqual(cm.exception.code, 1)
| 440 | 220 | 23 |
1e01397b0e0b55041950595176edcb92e499b82c | 169 | py | Python | mtd/__init__.py | jbernhard/mtd | 6326fdb44f071311ace7862371e658d609f43d08 | [
"MIT"
] | 4 | 2017-02-21T21:20:07.000Z | 2020-11-03T15:54:13.000Z | mtd/__init__.py | jbernhard/mtd | 6326fdb44f071311ace7862371e658d609f43d08 | [
"MIT"
] | null | null | null | mtd/__init__.py | jbernhard/mtd | 6326fdb44f071311ace7862371e658d609f43d08 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = '0.1.dev0'
from .multigp import MultiGP
from .pca import PCA
from .util import *
from . import priors
from george import kernels
| 16.9 | 28 | 0.710059 | # -*- coding: utf-8 -*-
__version__ = '0.1.dev0'
from .multigp import MultiGP
from .pca import PCA
from .util import *
from . import priors
from george import kernels
| 0 | 0 | 0 |
c224b45fb5e328057b7f525d9ec74ad91d54f04d | 642 | py | Python | src/core/sessions/buffers/exporter/exporters/text.py | Oire/TheQube | fcfd8a68b15948e0740642d635db24adef8cc314 | [
"MIT"
] | 21 | 2015-08-02T21:26:14.000Z | 2019-12-27T09:57:44.000Z | src/core/sessions/buffers/exporter/exporters/text.py | Oire/TheQube | fcfd8a68b15948e0740642d635db24adef8cc314 | [
"MIT"
] | 34 | 2015-01-12T00:38:14.000Z | 2020-08-31T11:19:37.000Z | src/core/sessions/buffers/exporter/exporters/text.py | Oire/TheQube | fcfd8a68b15948e0740642d635db24adef8cc314 | [
"MIT"
] | 15 | 2015-03-24T15:42:30.000Z | 2020-09-24T20:26:42.000Z | from core.sessions.buffers.exporter.exporters import BaseExporter
import codecs
| 27.913043 | 81 | 0.699377 | from core.sessions.buffers.exporter.exporters import BaseExporter
import codecs
class TextExporter (BaseExporter):
def __init__(self, **kwargs):
if 'item_template' not in kwargs or kwargs['item_template'] is None:
raise TypeError("This exporter requires item_template to be str or unicode.")
super(TextExporter, self).__init__(**kwargs)
def Run(self):
with self:
with codecs.open(self.filename, "w", "utf8") as f:
for item in self.GetFormattedItems():
f.write(item + u"\r\n")
@classmethod
def GetFileExtension(self):
return "txt"
@classmethod
def GetName(self):
return _("Text file")
| 389 | 147 | 24 |
9d53527b82665492d24c2490026e630dce7fcf5e | 724 | py | Python | 15/15_b_selection_sort.py | srinijadharani/DataStructuresLab | fa6fd5fa64467cdb62302c66a708b041792da862 | [
"MIT"
] | null | null | null | 15/15_b_selection_sort.py | srinijadharani/DataStructuresLab | fa6fd5fa64467cdb62302c66a708b041792da862 | [
"MIT"
] | null | null | null | 15/15_b_selection_sort.py | srinijadharani/DataStructuresLab | fa6fd5fa64467cdb62302c66a708b041792da862 | [
"MIT"
] | null | null | null | '''
Write a program to implement various sorting techniques:
[Compare with Python’s Built-In Sorting Functions also]
• Insertion sort
• Selection Sort
• Bubble Sort
• Merge Sort
• Quick Sort
'''
# SELECTION SORT
n = int(input("Enter the number of list items: "))
arr = []
for i in range(0, n):
ele = int(input("Enter list item: "))
arr.append(ele)
print("Initial array is:", arr)
print("Insertion-sorted array is:", selection(arr)) | 25.857143 | 58 | 0.603591 | '''
Write a program to implement various sorting techniques:
[Compare with Python’s Built-In Sorting Functions also]
• Insertion sort
• Selection Sort
• Bubble Sort
• Merge Sort
• Quick Sort
'''
# SELECTION SORT
n = int(input("Enter the number of list items: "))
arr = []
for i in range(0, n):
ele = int(input("Enter list item: "))
arr.append(ele)
print("Initial array is:", arr)
def selection(arr):
for i in range(len(arr)):
min_index = i
for j in range(i+1, len(arr)):
if arr[min_index] > arr[j]:
min_index = j
arr[i], arr[min_index] = arr[min_index], arr[i]
return arr
print("Insertion-sorted array is:", selection(arr)) | 237 | 0 | 25 |
c8cffebb050fe5118af1f33942d9c35ce08558c9 | 7,210 | py | Python | aiport_server/legacy/trans_chat/data_loader.py | ysb06/ai-port-backend | 88672388e73a48e6237ac2e51894ec25d9e283ce | [
"MIT"
] | null | null | null | aiport_server/legacy/trans_chat/data_loader.py | ysb06/ai-port-backend | 88672388e73a48e6237ac2e51894ec25d9e283ce | [
"MIT"
] | null | null | null | aiport_server/legacy/trans_chat/data_loader.py | ysb06/ai-port-backend | 88672388e73a48e6237ac2e51894ec25d9e283ce | [
"MIT"
] | null | null | null | import pandas as pd
import torch
import copy
from typing import Dict, List, Tuple
from torch import LongTensor
from torch.tensor import Tensor
from torch.utils.data import Dataset
from konlpy.tag import Kkma
from nltk.tokenize import word_tokenize
from tqdm.auto import tqdm
| 36.785714 | 119 | 0.644383 | import pandas as pd
import torch
import copy
from typing import Dict, List, Tuple
from torch import LongTensor
from torch.tensor import Tensor
from torch.utils.data import Dataset
from konlpy.tag import Kkma
from nltk.tokenize import word_tokenize
from tqdm.auto import tqdm
class ConversationDataset(Dataset):
def __init__(self, kor2idx: dict, idx2kor: dict, eng2idx: dict, idx2eng: dict, tensor_sentence_data: list) -> None:
super().__init__()
self.kor2idx = kor2idx
self.idx2kor = idx2kor
self.eng2idx = eng2idx
self.idx2eng = idx2eng
self.tensor_sentences = tensor_sentence_data
def __len__(self):
return len(self.tensor_sentences)
def __getitem__(self, index) -> Tuple[LongTensor, LongTensor]:
return self.tensor_sentences[index]
def generate_conversation_dataset(
path: str,
shuffle: bool = True,
seed: int = None,
batch_size: int = None,
sort_by_len=True,
test_ratio: float = 0.2, validation_ratio: float = 0.2
):
kor2idx = {'<unk>': 0, '<pad>': 1, '<sos>': 2, '<eos>': 3}
idx2kor = {0: '<unk>', 1: '<pad>', 2: '<sos>', 3: '<eos>'}
eng2idx = {'<unk>': 0, '<pad>': 1, '<sos>': 2, '<eos>': 3}
idx2eng = {0: '<unk>', 1: '<pad>', 2: '<sos>', 3: '<eos>'}
raw_data = pd.read_csv(path)
if shuffle:
if seed is None:
raw_data = raw_data \
.sample(frac=1) \
.reset_index(drop=True)
else:
raw_data = raw_data \
.sample(frac=1, random_state=seed) \
.reset_index(drop=True)
raw_kor = raw_data["kor_sent"].to_numpy()
raw_eng = raw_data["eng_sent"].to_numpy()
# 영어는 단어 단위로 토큰화하는 데 이는 번역 결과를 도출하는 데 있어 편리성을 위한 것임.
# 정확한 번역을 위해서는 더 자세한 수준으로 토큰화한 후 재조립 과정이 필요할 것으로 보임.
kkma = Kkma()
kor_morphs_set = set()
eng_words_set = set()
tokenized_sentences = []
print("Torkenizing and generating vocabs...")
for kor_sentence, eng_sentence in tqdm(zip(raw_kor, raw_eng), total=len(raw_kor)):
kor_sentence_morphs = kkma.morphs(kor_sentence)
eng_sentence_words = word_tokenize(eng_sentence.lower()) # 소문자 후 토큰화
kor_morphs_set.update(kor_sentence_morphs) # 한국어
eng_words_set.update(eng_sentence_words) # 영어 Vocab 대상 집합
tokenized_sentences.append(
(['<sos>'] + kor_sentence_morphs + ['<eos>'],
['<sos>'] + eng_sentence_words + ['<eos>'])
) # 문장 전처리
kor_morphs_set = sorted(kor_morphs_set)
eng_words_set = sorted(eng_words_set)
# 한글, 영어 vocab 사전 생성, 사전 초기화를 하지 않는 부분을 유의 (에러 가능성)
for morph in kor_morphs_set:
# 아래 조건문은 이전의 코드에서 생긴 문제에서 비롯됨.
if morph not in kor2idx:
if len(kor2idx) != len(idx2kor):
raise Exception("Korean dictionary is broken.")
idx = len(kor2idx)
kor2idx[morph] = idx
idx2kor[idx] = morph
for word in eng_words_set:
# 여기 조건문도 위와 마찬가지.
if word not in eng2idx:
if len(eng2idx) != len(idx2eng):
raise Exception("English dictionary is broken.")
idx = len(eng2idx)
eng2idx[word] = idx
idx2eng[idx] = word
print("Spliting dataset...")
test_size = int(len(tokenized_sentences) * test_ratio)
validation_size = int(len(tokenized_sentences) * validation_ratio)
test_tokenized_sentence = tokenized_sentences[:test_size]
validation_tokenized_sentence = tokenized_sentences[test_size:test_size + validation_size]
train_tokenized_sentence = tokenized_sentences[test_size + validation_size:]
test_set_tensor = []
validation_set_tensor = []
train_set_tensor = []
if sort_by_len:
test_tokenized_sentence.sort(key=lambda x: len(x[0]), reverse=True)
validation_tokenized_sentence.sort(key=lambda x: len(x[0]), reverse=True)
train_tokenized_sentence.sort(key=lambda x: len(x[0]), reverse=True)
print("Indexing Senteces (Test set)...")
test_set_tensor = __generate_batch(test_tokenized_sentence, batch_size, kor2idx, eng2idx)
print("Indexing Senteces (Validation set)...")
validation_set_tensor = __generate_batch(validation_tokenized_sentence, batch_size, kor2idx, eng2idx)
print("Indexing Senteces (Train set)...")
train_set_tensor = __generate_batch(train_tokenized_sentence, batch_size, kor2idx, eng2idx)
test_set = ConversationDataset(
copy.deepcopy(kor2idx), copy.deepcopy(idx2kor),
copy.deepcopy(eng2idx), copy.deepcopy(idx2eng),
test_set_tensor
)
validation_set = ConversationDataset(
copy.deepcopy(kor2idx), copy.deepcopy(idx2kor),
copy.deepcopy(eng2idx), copy.deepcopy(idx2eng),
validation_set_tensor
)
train_set = ConversationDataset(
kor2idx, idx2kor,
eng2idx, idx2eng,
train_set_tensor
)
return train_set, validation_set, test_set
def __generate_batch(dataset: list, batch_size: int, kor2idx: dict, eng2idx: dict) -> Tuple[Tensor, Tensor]:
result = []
sentence_code_cache: List[Tuple[List, List]] = []
max_kor_length = 0
max_eng_length = 0
for index, (kor_tokenized_sentence, eng_tokenized_sentence) in enumerate(dataset):
kor_encoded_sentence = [kor2idx[token]
for token in kor_tokenized_sentence]
eng_encoded_sentence = [eng2idx[token]
for token in eng_tokenized_sentence]
# deque를 쓰는 게 효율적일까? 일단 별 차이 없을 것 같아 그냥 둠.
sentence_code_cache.append((kor_encoded_sentence, eng_encoded_sentence))
max_kor_length = max(max_kor_length, len(kor_encoded_sentence))
max_eng_length = max(max_eng_length, len(eng_encoded_sentence))
# 배치 및 Shape 처리 코드
if index is None or index % batch_size == batch_size - 1 or index == batch_size - 1:
kor_sentence_tensor_batch = None
eng_sentence_tensor_batch = None
for kor_sentence_code, eng_sentence_code in sentence_code_cache:
while len(kor_sentence_code) < max_kor_length:
kor_sentence_code.append(kor2idx["<pad>"])
while len(eng_sentence_code) < max_eng_length:
eng_sentence_code.append(kor2idx["<pad>"])
kor_sentence_tensor = LongTensor(kor_sentence_code).unsqueeze(1)
eng_sentence_tensor = LongTensor(eng_sentence_code).unsqueeze(1)
if kor_sentence_tensor_batch is None:
kor_sentence_tensor_batch = kor_sentence_tensor
else:
kor_sentence_tensor_batch = torch.cat((kor_sentence_tensor_batch, kor_sentence_tensor), 1)
if eng_sentence_tensor_batch is None:
eng_sentence_tensor_batch = eng_sentence_tensor
else:
eng_sentence_tensor_batch = torch.cat((eng_sentence_tensor_batch, eng_sentence_tensor), 1)
result.append((kor_sentence_tensor_batch, eng_sentence_tensor_batch))
sentence_code_cache.clear()
max_kor_length = 0
max_eng_length = 0
return result
| 7,139 | 14 | 149 |
c417ac78d6a74a21d493d6480c2e10db351b3d0a | 374 | py | Python | python/delay_vs_fanout.py | antmicro/nextpnr | bb5a8162dfebdf2d2cbf64bf5503142a0deef2a9 | [
"0BSD"
] | 865 | 2018-08-01T11:41:26.000Z | 2022-03-29T08:34:29.000Z | python/delay_vs_fanout.py | antmicro/nextpnr | bb5a8162dfebdf2d2cbf64bf5503142a0deef2a9 | [
"0BSD"
] | 533 | 2018-08-01T14:01:59.000Z | 2022-03-30T13:11:09.000Z | python/delay_vs_fanout.py | antmicro/nextpnr | bb5a8162dfebdf2d2cbf64bf5503142a0deef2a9 | [
"0BSD"
] | 194 | 2018-08-01T12:48:19.000Z | 2022-03-29T10:19:57.000Z | with open("delay_vs_fanout.csv", "w") as f:
print("fanout,delay", file=f)
for net_name, net in ctx.nets:
if net.driver.cell is None:
continue
if net.driver.cell.type == "DCCA":
continue # ignore global clocks
for user in net.users:
print(f"{len(net.users)},{ctx.getNetinfoRouteDelay(net, user)}", file=f)
| 34 | 84 | 0.590909 | with open("delay_vs_fanout.csv", "w") as f:
print("fanout,delay", file=f)
for net_name, net in ctx.nets:
if net.driver.cell is None:
continue
if net.driver.cell.type == "DCCA":
continue # ignore global clocks
for user in net.users:
print(f"{len(net.users)},{ctx.getNetinfoRouteDelay(net, user)}", file=f)
| 0 | 0 | 0 |
939e70e5d13b2a3fe17b2bb42d0609a0c007c0ee | 9,957 | py | Python | Bio/PDB/TorusDBN/_io.py | mchelem/biopython | 2daa5fee06077bbada8b89fe6032c3f123318fc2 | [
"PostgreSQL"
] | 1 | 2020-06-29T17:32:16.000Z | 2020-06-29T17:32:16.000Z | Bio/PDB/TorusDBN/_io.py | mchelem/biopython | 2daa5fee06077bbada8b89fe6032c3f123318fc2 | [
"PostgreSQL"
] | null | null | null | Bio/PDB/TorusDBN/_io.py | mchelem/biopython | 2daa5fee06077bbada8b89fe6032c3f123318fc2 | [
"PostgreSQL"
] | null | null | null | # Copyright (C) 2011 by Michele Silva (michele.silva@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import math
import numpy
from mocapy.framework import eMISMASK
from Bio.PDB import PDBParser
from Bio.PDB.DSSP import DSSP
from Bio.PDB.Residue import Residue
from Bio.PDB.Polypeptide import PPBuilder, three_to_index
from Bio.PDB.Vector import calc_dihedral
from Bio.PDB.TorusDBN.TorusDBNExceptions import TorusDBNException, \
TorusDBNChainBreakException, TorusDBNBuildPolypeptideException
from Bio.PDB.TorusDBN._structure import dssp_to_index
def create_sequence_from_file(chain_pdb, missing_residues, quiet_parser=False):
""" Read a PDB file and creates a sequence and mismask to represent
its content.
@param chain_pdb: The PDB file to be read.
@type chain_pd: str
@param missing_residues: A dictionary with the missing residues.
@type missing_residues: dict
@param quiet_parser: Disable PDBParser warnings.
@type quiet_parser: bool
"""
sequences = []
mismasks = []
output_data = []
output_mismask = []
parser = PDBParser(QUIET=quiet_parser)
structure = parser.get_structure("X", chain_pdb)
dssp = DSSP(model=structure[0], pdb_file=chain_pdb)
# Loop over residues in peptides
ppb = PPBuilder()
pp_list = ppb.build_peptides(structure[0])
chain_list = structure[0].get_list()
if len(pp_list) == 0:
raise TorusDBNBuildPolypeptideException(
"Could not create a list of Polypeptide objects from the file %s."
% (chain_pdb)
)
else:
pp_chains, chain_missing_residues = _get_pp_with_chain_break(
chain_pdb, pp_list, chain_list, missing_residues)
for pp_index, pp in enumerate(pp_chains):
phi_psi_list = pp.get_phi_psi_list()
missing_residues = chain_missing_residues[pp_index]
for i in xrange(1, len(phi_psi_list)-1):
seq = [0] * 6
mism = [eMISMASK.MOCAPY_HIDDEN] + 4 * [eMISMASK.MOCAPY_OBSERVED]
# Amino acid
res = pp[i]
res_name = res.get_resname()
res_index = res.get_id()[1]
aa_index = three_to_index(res_name)
if res_index in missing_residues:
seq[3] = aa_index
mism[1] = eMISMASK.MOCAPY_MISSING # angles unknown
mism[3] = eMISMASK.MOCAPY_MISSING # ss unknown
mism[4] = eMISMASK.MOCAPY_MISSING # cis unknown
else:
seq[3] = aa_index
# Secondary Structure
try:
ss = res.xtra["SS_DSSP"]
ss_index = dssp_to_index(ss)
seq[4] = ss_index
except:
mism[3] = eMISMASK.MOCAPY_MISSING # ss unknown
# Angles
if None in phi_psi_list[i]:
# Previous or next residue missing, therefore angles are
# Unknown
mism[1] = eMISMASK.MOCAPY_MISSING
else:
seq[1:3] = phi_psi_list[i]
# Cis/Trans information
if (res_index - 1) in missing_residues:
mism[4] = eMISMASK.MOCAPY_MISSING # cis unknown
else:
try:
seq[5] = _get_peptide_bond_conformation(res, pp[i-1])
except TorusDBNException:
mism[4] = eMISMASK.MOCAPY_MISSING # cis unknown
output_data.append(seq)
output_mismask.append(mism)
if output_data and output_mismask:
sequences.append(numpy.array(output_data))
mismasks.append(numpy.array(output_mismask, dtype = numpy.uint))
else:
raise TorusDBNException(
"Could not create training data from the file %s."
% (chain_pdb)
)
return sequences, mismasks
def read_missing_residues(filename):
""" Read missing residues from a file.
The file format is the following:
# file chain position residue
'mychain.pdb' A 23 ALA
'mychain.pdb' C 27 LEU
'other.pdb A 12 GLY
@param filename: The file describing the missing residues.
@type filename: str
@rtype: dict
@return: Dictionary of residues whose key is the (file, chain, position).
"""
try:
missing_residues = {}
if not os.path.isfile(filename):
raise TorusDBNException(
"Could not open file %s for reading missing residues." % filename)
residues_file = open(filename)
for i, line in enumerate(residues_file.readlines()):
line = line.strip()
if not line.startswith("#") and len(line) > 1:
try:
chain_pdb, chain, res_index, res_name = line.split()
missing_residues[
(chain_pdb, chain, int(res_index))
] = Residue(id=('', int(res_index), ''), resname=res_name, segid='')
except ValueError:
TorusDBNException(
"Could not read missing residues file %s at line %d." %
(filename, i + 1)
)
except IOError:
TorusDBNException("Could not open file %s for reading missing residues." % filename)
return missing_residues
def _get_missing_residue(missing_residues, chain_pdb, chain, residue_index):
""" Get the missing residues corresponding to a file, chain and position.
@param missing_residues: Dictionary of residues indexed by
(file, chain_id, position).
@type missing_residues: dict
@param chain_pdb: The filename where the chain is described.
@type chain_pdb: str
@param chain: The chain identifier.
@type chain: str
@param residue_index: The position of the residue in the chain
@type residue_index: int
@rtype: str
@return: The missing residue three letter identifier.
"""
chain_pdb = os.path.split(chain_pdb)[-1]
try:
return missing_residues[(chain_pdb, chain, residue_index)]
except:
raise TorusDBNChainBreakException(
"Chain break in file %s, chain %s, residue %d, could not be handled." %
(chain_pdb, chain, residue_index)
)
def _get_pp_with_chain_break(chain_pdb, pp_list, chain_list, missing_residues_dict):
""" Get the polypeptides chains for a given chain file, adding missing
residues.
@param chain_pdb: The file containing the chains.
@type chain_pdb: str
@param pp_list: List of polypeptides.
@type pp_list: list(Polypeptide)
@param pp_list: List of chains.
@type pp_list: list(Chain)
@param missing_residues_dict: Dictionary of residues indexed by
(file, chain_id, position).
@type missing_residues_dict: dict
@rtype: tuple(list(Polypeptide), list(int))
@return: Polypeptides and list with missing residues position.
"""
pp_list_new = []
missing_residues = []
pp_list_new.append(pp_list[0])
missing_residues.append([])
chain_index = 0
for i, pp in enumerate(pp_list[1:]):
last_residue = pp_list_new[-1][-1].get_id()[1] + 1
first_residue = pp[0].get_id()[1]
if last_residue < first_residue:
missing_residues_index = range(last_residue, first_residue)
residues = []
for res_index in missing_residues_index:
try:
res = _get_missing_residue(
missing_residues_dict,
chain_pdb,
chain_list[chain_index].get_id(),
res_index,
)
residues.append(res)
except:
residues = []
break
if len(residues) > 0:
for res in residues:
pp_list_new[-1].append(res)
missing_residues[-1] += missing_residues_index
pp_list_new[-1] += pp
else:
pp_list_new.append(pp)
missing_residues.append([])
chain_index += 1
else:
pp_list_new.append(pp)
missing_residues.append([])
chain_index += 1
if len(pp_list_new) != len(chain_list):
raise TorusDBNChainBreakException(
"Chain break in file %s could not be handled." % (chain_pdb))
return pp_list_new, missing_residues
def _get_peptide_bond_conformation(res, prev_res):
""" Get the conformation of the peptide bond.
@param res: A residue
@type res: Residue
@param res: The previous residue
@type res: Residue
@rtype: int (cis: 0, trans: 1)
@return: Conformation of the peptide bond.
"""
CIS = 0
TRANS = 1
try:
CA_prev = prev_res['CA'].get_vector()
C_prev = prev_res['C'].get_vector()
N = res['N'].get_vector()
CA = res['CA'].get_vector()
dihedral = calc_dihedral(CA_prev, C_prev, N, CA)
except:
raise TorusDBNException("Could not obtain the conformation of the peptide bond.")
if abs(dihedral) < math.pi/4:
return CIS
else:
return TRANS
| 33.982935 | 102 | 0.568545 | # Copyright (C) 2011 by Michele Silva (michele.silva@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import math
import numpy
from mocapy.framework import eMISMASK
from Bio.PDB import PDBParser
from Bio.PDB.DSSP import DSSP
from Bio.PDB.Residue import Residue
from Bio.PDB.Polypeptide import PPBuilder, three_to_index
from Bio.PDB.Vector import calc_dihedral
from Bio.PDB.TorusDBN.TorusDBNExceptions import TorusDBNException, \
TorusDBNChainBreakException, TorusDBNBuildPolypeptideException
from Bio.PDB.TorusDBN._structure import dssp_to_index
def create_sequence_from_file(chain_pdb, missing_residues, quiet_parser=False):
""" Read a PDB file and creates a sequence and mismask to represent
its content.
@param chain_pdb: The PDB file to be read.
@type chain_pd: str
@param missing_residues: A dictionary with the missing residues.
@type missing_residues: dict
@param quiet_parser: Disable PDBParser warnings.
@type quiet_parser: bool
"""
sequences = []
mismasks = []
output_data = []
output_mismask = []
parser = PDBParser(QUIET=quiet_parser)
structure = parser.get_structure("X", chain_pdb)
dssp = DSSP(model=structure[0], pdb_file=chain_pdb)
# Loop over residues in peptides
ppb = PPBuilder()
pp_list = ppb.build_peptides(structure[0])
chain_list = structure[0].get_list()
if len(pp_list) == 0:
raise TorusDBNBuildPolypeptideException(
"Could not create a list of Polypeptide objects from the file %s."
% (chain_pdb)
)
else:
pp_chains, chain_missing_residues = _get_pp_with_chain_break(
chain_pdb, pp_list, chain_list, missing_residues)
for pp_index, pp in enumerate(pp_chains):
phi_psi_list = pp.get_phi_psi_list()
missing_residues = chain_missing_residues[pp_index]
for i in xrange(1, len(phi_psi_list)-1):
seq = [0] * 6
mism = [eMISMASK.MOCAPY_HIDDEN] + 4 * [eMISMASK.MOCAPY_OBSERVED]
# Amino acid
res = pp[i]
res_name = res.get_resname()
res_index = res.get_id()[1]
aa_index = three_to_index(res_name)
if res_index in missing_residues:
seq[3] = aa_index
mism[1] = eMISMASK.MOCAPY_MISSING # angles unknown
mism[3] = eMISMASK.MOCAPY_MISSING # ss unknown
mism[4] = eMISMASK.MOCAPY_MISSING # cis unknown
else:
seq[3] = aa_index
# Secondary Structure
try:
ss = res.xtra["SS_DSSP"]
ss_index = dssp_to_index(ss)
seq[4] = ss_index
except:
mism[3] = eMISMASK.MOCAPY_MISSING # ss unknown
# Angles
if None in phi_psi_list[i]:
# Previous or next residue missing, therefore angles are
# Unknown
mism[1] = eMISMASK.MOCAPY_MISSING
else:
seq[1:3] = phi_psi_list[i]
# Cis/Trans information
if (res_index - 1) in missing_residues:
mism[4] = eMISMASK.MOCAPY_MISSING # cis unknown
else:
try:
seq[5] = _get_peptide_bond_conformation(res, pp[i-1])
except TorusDBNException:
mism[4] = eMISMASK.MOCAPY_MISSING # cis unknown
output_data.append(seq)
output_mismask.append(mism)
if output_data and output_mismask:
sequences.append(numpy.array(output_data))
mismasks.append(numpy.array(output_mismask, dtype = numpy.uint))
else:
raise TorusDBNException(
"Could not create training data from the file %s."
% (chain_pdb)
)
return sequences, mismasks
def read_missing_residues(filename):
""" Read missing residues from a file.
The file format is the following:
# file chain position residue
'mychain.pdb' A 23 ALA
'mychain.pdb' C 27 LEU
'other.pdb A 12 GLY
@param filename: The file describing the missing residues.
@type filename: str
@rtype: dict
@return: Dictionary of residues whose key is the (file, chain, position).
"""
try:
missing_residues = {}
if not os.path.isfile(filename):
raise TorusDBNException(
"Could not open file %s for reading missing residues." % filename)
residues_file = open(filename)
for i, line in enumerate(residues_file.readlines()):
line = line.strip()
if not line.startswith("#") and len(line) > 1:
try:
chain_pdb, chain, res_index, res_name = line.split()
missing_residues[
(chain_pdb, chain, int(res_index))
] = Residue(id=('', int(res_index), ''), resname=res_name, segid='')
except ValueError:
TorusDBNException(
"Could not read missing residues file %s at line %d." %
(filename, i + 1)
)
except IOError:
TorusDBNException("Could not open file %s for reading missing residues." % filename)
return missing_residues
def _get_missing_residue(missing_residues, chain_pdb, chain, residue_index):
""" Get the missing residues corresponding to a file, chain and position.
@param missing_residues: Dictionary of residues indexed by
(file, chain_id, position).
@type missing_residues: dict
@param chain_pdb: The filename where the chain is described.
@type chain_pdb: str
@param chain: The chain identifier.
@type chain: str
@param residue_index: The position of the residue in the chain
@type residue_index: int
@rtype: str
@return: The missing residue three letter identifier.
"""
chain_pdb = os.path.split(chain_pdb)[-1]
try:
return missing_residues[(chain_pdb, chain, residue_index)]
except:
raise TorusDBNChainBreakException(
"Chain break in file %s, chain %s, residue %d, could not be handled." %
(chain_pdb, chain, residue_index)
)
def _get_pp_with_chain_break(chain_pdb, pp_list, chain_list, missing_residues_dict):
""" Get the polypeptides chains for a given chain file, adding missing
residues.
@param chain_pdb: The file containing the chains.
@type chain_pdb: str
@param pp_list: List of polypeptides.
@type pp_list: list(Polypeptide)
@param pp_list: List of chains.
@type pp_list: list(Chain)
@param missing_residues_dict: Dictionary of residues indexed by
(file, chain_id, position).
@type missing_residues_dict: dict
@rtype: tuple(list(Polypeptide), list(int))
@return: Polypeptides and list with missing residues position.
"""
pp_list_new = []
missing_residues = []
pp_list_new.append(pp_list[0])
missing_residues.append([])
chain_index = 0
for i, pp in enumerate(pp_list[1:]):
last_residue = pp_list_new[-1][-1].get_id()[1] + 1
first_residue = pp[0].get_id()[1]
if last_residue < first_residue:
missing_residues_index = range(last_residue, first_residue)
residues = []
for res_index in missing_residues_index:
try:
res = _get_missing_residue(
missing_residues_dict,
chain_pdb,
chain_list[chain_index].get_id(),
res_index,
)
residues.append(res)
except:
residues = []
break
if len(residues) > 0:
for res in residues:
pp_list_new[-1].append(res)
missing_residues[-1] += missing_residues_index
pp_list_new[-1] += pp
else:
pp_list_new.append(pp)
missing_residues.append([])
chain_index += 1
else:
pp_list_new.append(pp)
missing_residues.append([])
chain_index += 1
if len(pp_list_new) != len(chain_list):
raise TorusDBNChainBreakException(
"Chain break in file %s could not be handled." % (chain_pdb))
return pp_list_new, missing_residues
def _get_peptide_bond_conformation(res, prev_res):
""" Get the conformation of the peptide bond.
@param res: A residue
@type res: Residue
@param res: The previous residue
@type res: Residue
@rtype: int (cis: 0, trans: 1)
@return: Conformation of the peptide bond.
"""
CIS = 0
TRANS = 1
try:
CA_prev = prev_res['CA'].get_vector()
C_prev = prev_res['C'].get_vector()
N = res['N'].get_vector()
CA = res['CA'].get_vector()
dihedral = calc_dihedral(CA_prev, C_prev, N, CA)
except:
raise TorusDBNException("Could not obtain the conformation of the peptide bond.")
if abs(dihedral) < math.pi/4:
return CIS
else:
return TRANS
| 0 | 0 | 0 |
bd4cfb50614b26d2a45f7796cdb6de5d44ba7f1a | 6,598 | py | Python | pool.py | scarecrow87/nhzpool | 90a5718a6a491e0432a5c60429b3b16ac8114df1 | [
"MIT"
] | 1 | 2015-02-16T11:49:16.000Z | 2015-02-16T11:49:16.000Z | pool.py | scarecrow87/nhzpool | 90a5718a6a491e0432a5c60429b3b16ac8114df1 | [
"MIT"
] | null | null | null | pool.py | scarecrow87/nhzpool | 90a5718a6a491e0432a5c60429b3b16ac8114df1 | [
"MIT"
] | 5 | 2015-02-16T12:42:45.000Z | 2019-07-14T20:50:49.000Z | #!/usr/bin/env python
# author: pharesim@nhzcrypto.org
import json
import urllib
import urllib2
import sqlite3
import math
import ConfigParser
import time
config = ConfigParser.RawConfigParser()
config.read('config.ini')
conn = sqlite3.connect(config.get("pool", "database"))
c = conn.cursor()
if __name__ == "__main__":
main()
| 37.91954 | 191 | 0.615944 | #!/usr/bin/env python
# author: pharesim@nhzcrypto.org
import json
import urllib
import urllib2
import sqlite3
import math
import ConfigParser
import time
config = ConfigParser.RawConfigParser()
config.read('config.ini')
conn = sqlite3.connect(config.get("pool", "database"))
c = conn.cursor()
def main():
while True:
startForging()
getleased()
getNew(json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccountBlockIds&account="+config.get("pool", "poolaccount")+"×tamp="+getTimestamp()).read()))
time.sleep(100)
payout()
time.sleep(100)
def startForging():
payload = {
'requestType': 'getForging',
'secretPhrase': config.get("pool", "poolphrase")
}
opener = urllib2.build_opener(urllib2.HTTPHandler())
data = urllib.urlencode(payload)
forging = json.loads(opener.open(config.get("pool", "nhzhost")+'/nhz', data=data).read())
if 'errorCode' in forging.keys():
if forging['errorCode'] == 5:
payload['requestType'] = 'startForging'
data = urllib.urlencode(payload)
forging = json.loads(opener.open(config.get("pool", "nhzhost")+'/nhz', data=data).read())
print "Started forging"
return True
def getleased():
leasedaccounts = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+config.get("pool", "poolaccountRS")).read())
try:
for lessor in leasedaccounts['lessorsRS']:
lessorAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+lessor).read())
balance = lessorAccount['guaranteedBalanceNQT']
accountadd = lessorAccount['accountRS']
heightfrom = lessorAccount['currentLeasingHeightFrom']
heightto = lessorAccount['currentLeasingHeightTo']
rs = lessorAccount['accountRS']
c.execute("INSERT OR REPLACE INTO leased (account, heightfrom, heightto, amount, ars) VALUES (?,?,?,?,?);",(accountadd, heightfrom, heightto, balance, rs))
conn.commit()
except KeyError:
# If no lessors, just return
pass
return True
def getNew(newBlocks):
try:
shares = getShares()
if 'blockIds' in newBlocks:
for block in newBlocks['blockIds']:
print block
blockData = json.loads(urllib2.urlopen(config.get("pool", "nhzhost2")+"/nhz?requestType=getBlock&block="+block).read())
c.execute("INSERT OR IGNORE INTO blocks (timestamp, block, totalfee, height) VALUES (?,?,?,?);", (blockData['timestamp'],block,blockData['totalFeeNQT'],blockData['height']))
blockFee = float(blockData['totalFeeNQT'])
blockheight = float(blockData['height'])
if blockFee > 0:
for (account, amount) in shares.items():
if account is not config.get("pool", "poolaccountRS"):
lessorAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+account).read())
heightfrom = lessorAccount['currentLeasingHeightFrom']
if heightfrom < blockheight:
payout = math.floor(blockFee * (amount['percentage']/100))
c.execute(
"INSERT OR IGNORE INTO accounts (blocktime, account, percentage, amount, paid) VALUES (?,?,?,?,?);",
(blockData['timestamp'],account,amount['percentage'],payout,False)
)
conn.commit()
except KeyError:
pass
return True
def getTimestamp():
timestamp = config.get("pool", "poolstart")
c.execute("SELECT timestamp FROM blocks ORDER BY timestamp DESC LIMIT 1;")
blocks = c.fetchall()
if len(blocks) > 0 and blocks[0][0] > config.get("pool", "poolstart"):
timestamp = blocks[0][0]
return str(int(timestamp)+1)
def getShares():
poolAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+config.get("pool", "poolaccountRS")).read())
totalAmount = 0
if 'guaranteedBalanceNQT' in poolAccount:
totalAmount = float(poolAccount['guaranteedBalanceNQT'])
leasedAmount = { config.get("pool", "poolaccountRS"): { 'amount': totalAmount } }
if 'lessors' in poolAccount:
for lessor in poolAccount['lessorsRS']:
lessorAccount = json.loads(urllib2.urlopen(config.get("pool", "nhzhost")+"/nhz?requestType=getAccount&account="+lessor).read())
leasedAmount[lessor] = { 'amount': float(lessorAccount['guaranteedBalanceNQT']) }
totalAmount += float(lessorAccount['guaranteedBalanceNQT'])
if totalAmount > 0:
for (account, amount) in leasedAmount.items():
leasedAmount[account]['percentage'] = amount['amount'] / (totalAmount/100)
return leasedAmount
def payout():
c.execute("SELECT account, amount FROM accounts WHERE paid=0 AND amount>0;")
unpaid = c.fetchall()
c.execute("SELECT * FROM blocks WHERE totalfee>0;")
blocks = c.fetchall()
pending = {}
for share in unpaid:
if share[0] in pending:
pending[share[0]] += share[1]
else:
pending[share[0]] = share[1]
for (account, amount) in pending.items():
if amount > getLimit():
time.sleep(100)
fee = int(math.floor(((amount*float(config.get("pool", "feePercent")))/100)))
payment = str((amount-fee)-100000000)
account = str(account)
fee = str(fee)
print "Pay out "+payment+" to "+account+" (keep fee: "+fee+")"
payload = {
'requestType': 'sendMoney',
'secretPhrase': config.get("pool", "poolphrase"),
'recipient': account,
'amountNQT': payment,
'feeNQT': 100000000,
'deadline': 60
}
opener = urllib2.build_opener(urllib2.HTTPHandler())
data = urllib.urlencode(payload)
content = json.loads(opener.open(config.get("pool", "nhzhost")+'/nhz', data=data).read())
if 'transaction' in content.keys():
c.execute("UPDATE accounts SET paid=? WHERE account=?;",(content['transaction'],str(account)))
c.execute("INSERT INTO payouts (account, fee, payment) VALUES (?,?,?);",(account, fee, payment))
conn.commit()
return True
def getLimit():
return float(config.get("pool", "payoutlimit"))*100000000;
if __name__ == "__main__":
main()
| 6,045 | 0 | 200 |
6027c33c333db43152b335b880f0d183ba49d46d | 4,319 | py | Python | parsl/tests/manual_tests/test_basic.py | benclifford/parsl | 21f8681882779050d2e074591e95ada43789748f | [
"Apache-2.0"
] | 2 | 2019-02-25T16:43:30.000Z | 2019-03-04T17:25:00.000Z | parsl/tests/manual_tests/test_basic.py | benclifford/parsl | 21f8681882779050d2e074591e95ada43789748f | [
"Apache-2.0"
] | null | null | null | parsl/tests/manual_tests/test_basic.py | benclifford/parsl | 21f8681882779050d2e074591e95ada43789748f | [
"Apache-2.0"
] | 2 | 2019-04-30T13:46:23.000Z | 2019-06-04T16:14:46.000Z | import argparse
import time
import parsl
# Tested. Confirmed. Local X Local X SingleNodeLauncher
from parsl.tests.configs.local_ipp import config
# Tested. Confirmed. ssh X Slurm X SingleNodeLauncher
# from parsl.tests.configs.midway_ipp import config
# Tested. Confirmed. ssh X Slurm X SingelNodeLauncher
# from parsl.tests.configs.midway_ipp_multicore import config
# Tested. Confirmed. ssh X Slurm X SrunLauncher
# from parsl.tests.configs.midway_ipp_multinode import config
# OSG requires python3.5 for testing. This test is inconsitent,
# breaks often depending on where the test lands
# Tested. Confirmed. ssh X Condor X single_node
# from parsl.tests.configs.osg_ipp_multinode import config
# Tested. Confirmed. ssh X Torque X AprunLauncher
# from parsl.tests.configs.swan_ipp import config
# Tested. Confirmed. ssh X Torque X AprunLauncher
# from parsl.tests.configs.swan_ipp_multinode import config
# Tested. Confirmed. ssh X Slurm X SingleNodeLauncher
# from parsl.tests.configs.cori_ipp_single_node import config
# Tested. Confirmed. ssh X Slurm X srun
# from parsl.tests.configs.cori_ipp_multinode import config
# from parsl.tests.configs.cooley_ssh_il_single_node import config
# Tested. Confirmed. local X GridEngine X singleNode
# from parsl.tests.configs.cc_in2p3_local_single_node import config
# from parsl.tests.configs.comet_ipp_multinode import config
# from parsl.tests.configs.htex_local import config
# from parsl.tests.configs.exex_local import config
parsl.set_stream_logger()
# from htex_midway import config
# from htex_swan import config
from parsl.app.app import python_app # , bash_app
parsl.load(config)
@python_app
@python_app
@python_app
@python_app
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sitespec", default=None)
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.sitespec:
config = None
try:
exec("import parsl; from {} import config".format(args.sitespec))
parsl.load(config)
except Exception:
print("Failed to load the requested config : ", args.sitespec)
exit(0)
if args.debug:
parsl.set_stream_logger()
x = test_simple(int(args.count))
# x = test_imports()
# x = test_parallel_for()
# x = test_parallel_for(int(args.count))
# x = test_stdout()
x = test_platform()
| 26.496933 | 77 | 0.664737 | import argparse
import time
import parsl
# Tested. Confirmed. Local X Local X SingleNodeLauncher
from parsl.tests.configs.local_ipp import config
# Tested. Confirmed. ssh X Slurm X SingleNodeLauncher
# from parsl.tests.configs.midway_ipp import config
# Tested. Confirmed. ssh X Slurm X SingelNodeLauncher
# from parsl.tests.configs.midway_ipp_multicore import config
# Tested. Confirmed. ssh X Slurm X SrunLauncher
# from parsl.tests.configs.midway_ipp_multinode import config
# OSG requires python3.5 for testing. This test is inconsitent,
# breaks often depending on where the test lands
# Tested. Confirmed. ssh X Condor X single_node
# from parsl.tests.configs.osg_ipp_multinode import config
# Tested. Confirmed. ssh X Torque X AprunLauncher
# from parsl.tests.configs.swan_ipp import config
# Tested. Confirmed. ssh X Torque X AprunLauncher
# from parsl.tests.configs.swan_ipp_multinode import config
# Tested. Confirmed. ssh X Slurm X SingleNodeLauncher
# from parsl.tests.configs.cori_ipp_single_node import config
# Tested. Confirmed. ssh X Slurm X srun
# from parsl.tests.configs.cori_ipp_multinode import config
# from parsl.tests.configs.cooley_ssh_il_single_node import config
# Tested. Confirmed. local X GridEngine X singleNode
# from parsl.tests.configs.cc_in2p3_local_single_node import config
# from parsl.tests.configs.comet_ipp_multinode import config
# from parsl.tests.configs.htex_local import config
# from parsl.tests.configs.exex_local import config
parsl.set_stream_logger()
# from htex_midway import config
# from htex_swan import config
from parsl.app.app import python_app # , bash_app
parsl.load(config)
@python_app
def double(x):
return x * 2
@python_app
def echo(x, string, stdout=None):
print(string)
return x * 5
@python_app
def import_echo(x, string, stdout=None):
import time
time.sleep(0)
print(string)
return x * 5
@python_app
def platform(sleep=10, stdout=None):
import platform
import time
time.sleep(sleep)
return platform.uname()
def test_simple(n=2):
start = time.time()
x = double(n)
print("Result : ", x.result())
assert x.result() == n * \
2, "Expected double to return:{0} instead got:{1}".format(
n * 2, x.result())
print("Duration : {0}s".format(time.time() - start))
print("[TEST STATUS] test_parallel_for [SUCCESS]")
return True
def test_imports(n=2):
start = time.time()
x = import_echo(n, "hello world")
print("Result : ", x.result())
assert x.result() == n * \
5, "Expected double to return:{0} instead got:{1}".format(
n * 2, x.result())
print("Duration : {0}s".format(time.time() - start))
print("[TEST STATUS] test_parallel_for [SUCCESS]")
return True
def test_platform(n=2):
# sync
x = platform(sleep=0)
print(x.result())
d = []
for i in range(0, n):
x = platform(sleep=5)
d.append(x)
print(set([i.result()for i in d]))
return True
def test_parallel_for(n=2):
d = {}
start = time.time()
for i in range(0, n):
d[i] = double(i)
# time.sleep(0.01)
assert len(
d.keys()) == n, "Only {0}/{1} keys in dict".format(len(d.keys()), n)
[d[i].result() for i in d]
print("Duration : {0}s".format(time.time() - start))
print("[TEST STATUS] test_parallel_for [SUCCESS]")
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sitespec", default=None)
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.sitespec:
config = None
try:
exec("import parsl; from {} import config".format(args.sitespec))
parsl.load(config)
except Exception:
print("Failed to load the requested config : ", args.sitespec)
exit(0)
if args.debug:
parsl.set_stream_logger()
x = test_simple(int(args.count))
# x = test_imports()
# x = test_parallel_for()
# x = test_parallel_for(int(args.count))
# x = test_stdout()
x = test_platform()
| 1,511 | 0 | 180 |
2950cf0bc349ff3760e44a141ca72bdffc988490 | 255 | py | Python | __init__.py | blackCheetah/YouTube---Checker-for-Giveaways | c20b8ad762669a38c4ccfeb91486f008b7f0833c | [
"MIT"
] | null | null | null | __init__.py | blackCheetah/YouTube---Checker-for-Giveaways | c20b8ad762669a38c4ccfeb91486f008b7f0833c | [
"MIT"
] | 3 | 2021-02-08T20:25:28.000Z | 2021-06-01T22:43:03.000Z | __init__.py | blackCheetah/YouTube---Checker-for-Giveaways | c20b8ad762669a38c4ccfeb91486f008b7f0833c | [
"MIT"
] | null | null | null | """
General details about the program
"""
__version__ = '0.2.0'
__author__ = 'blackCheetah'
__created__ = '27.09.2018'
__updated__ = '07.10.2018'
__description__ = 'Retrieve all subscribers who made a comment on your last 50 videos.'
| 28.333333 | 87 | 0.678431 | """
General details about the program
"""
__version__ = '0.2.0'
__author__ = 'blackCheetah'
__created__ = '27.09.2018'
__updated__ = '07.10.2018'
__description__ = 'Retrieve all subscribers who made a comment on your last 50 videos.'
| 0 | 0 | 0 |
fcdcbb79481d3bb5f02262e55c738c7678897fbb | 52,534 | py | Python | src/operation.py | moibenko/enstore | 6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9 | [
"Intel",
"Unlicense"
] | 4 | 2021-10-17T11:17:59.000Z | 2022-02-28T16:58:40.000Z | src/operation.py | moibenko/enstore | 6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9 | [
"Intel",
"Unlicense"
] | 17 | 2021-10-05T21:44:06.000Z | 2022-03-31T16:58:40.000Z | src/operation.py | moibenko/enstore | 6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9 | [
"Intel",
"Unlicense"
] | 8 | 2021-09-02T18:55:49.000Z | 2022-03-09T21:05:28.000Z | #!/usr/bin/env python
"""
Schema
Schema | Name | Type | Owner
--------+-------------------+----------+---------
public | job | table | huangch
public | job_definition | table | huangch
public | job_definition_id | sequence | huangch
public | job_id | sequence | huangch
public | object | table | huangch
public | progress | table | huangch
public | task | table | huangch
public | task_id | sequence | huangch
Table "public.job"
Column | Type | Modifiers
---------+-----------------------------+------------------------------------------
id | integer | not null default nextval('job_id'::text)
name | character varying | not null
type | integer | not null
start | timestamp without time zone | default now()
finish | timestamp without time zone |
comment | character varying |
id2 | bigint | not null
Indexes:
"job_pkey" PRIMARY KEY, btree (id)
"job_id2_key" UNIQUE, btree (id2)
"job_finish_idx" btree (finish)
"job_name_idx" btree (name)
"job_start_idx" btree ("start")
"job_type_idx" btree ("type")
Foreign-key constraints:
"job_type_fkey" FOREIGN KEY ("type") REFERENCES job_definition(id) ON UPDATE CASCADE ON DELETE CASCADE
Table "public.job_definition"
Column | Type | Modifiers
---------+-------------------+-----------------------------------------------------
id | integer | not null default nextval('job_definition_id'::text)
name | character varying | not null
tasks | integer |
remarks | character varying |
Indexes:
"job_definition_pkey" PRIMARY KEY, btree (id)
"job_definition_name_idx" btree (name)
Table "public.task"
Column | Type | Modifiers
------------+-------------------+-------------------------------------------
id | integer | not null default nextval('task_id'::text)
seq | integer | not null
job_type | integer | not null
dsc | character varying |
action | character varying |
comment | character varying |
auto_start | character(1) | default 'm'::bpchar
Indexes:
"task_pkey" PRIMARY KEY, btree (seq, job_type)
Foreign-key constraints:
"task_job_type_fkey" FOREIGN KEY (job_type) REFERENCES job_definition(id) ON UPDATE CASCADE ON DELETE CASCADE
Table "public.progress"
Column | Type | Modifiers
---------+-----------------------------+---------------
job | integer | not null
task | integer | not null
start | timestamp without time zone | default now()
finish | timestamp without time zone |
comment | character varying |
args | character varying |
result | character varying |
Indexes:
"progress_job_idx" btree (job)
"progress_start_idx" btree ("start")
Foreign-key constraints:
"progress_job_fkey" FOREIGN KEY (job) REFERENCES job(id) ON UPDATE CASCADE ON DELETE CASCADE
Table "public.object"
Column | Type | Modifiers
--------+-------------------+-----------
job | integer | not null
object | character varying |
Indexes:
"object_job_idx" btree (job)
"object_object_idx" btree ("object")
Foreign-key constraints:
"object_job_fkey" FOREIGN KEY (job) REFERENCES job(id) ON UPDATE CASCADE ON DELETE CASCADE
"""
import os
import pprint
import pwd
import time
import types
import smtplib
import stat
import sys
# enstore import
import configuration_client
import e_errors
import enstore_functions2
import dbaccess
try:
import snow_fliptab
except:
pass
debug = False
# debug = True
csc = {}
# timestamp2time(ts) -- convert "YYYY-MM-DD HH:MM:SS" to time
# time2timestamp(t) -- convert time to "YYYY-MM-DD HH:MM:SS"
# is_time(t) -- check if t is of time type "YYYY-MM-DD_hh:mm:ss"
# got to handle space
# send_mail(subject, message) -- simplified sendmail
TEMP_DIR = '/tmp/operation'
# make it if it is not there
if not os.access(TEMP_DIR, os.F_OK):
os.makedirs(TEMP_DIR)
DATABASEHOST = 'stkensrv0n.fnal.gov'
#DATABASEHOST = 'localhost'
DATABASEPORT = 8800
DATABASENAME = 'operation'
DATABASEUSER = None
# This is a hard wired configuration
# get_cluster(host) -- determine current cluster
# get_script_host(cluster) -- determine script host
# get_write_protect_script_path(library_type) -- determine script path
# get_write_permit_script_path(library_type) -- determine script path
# get_default_library(cluster)
# get_qualifier(library_type) -- determine name qualifier
csc = configuration_client.ConfigurationClient((enstore_functions2.default_host(),
enstore_functions2.default_port()))
enstoredb = csc.get('database')
operation_db = csc.get('operation_db')
if operation_db['status'][0] == e_errors.OK:
DATABASENAME = operation_db['dbname']
DATABASEPORT = operation_db['dbport']
DATABASEHOST = operation_db['dbhost']
DATABASEUSER = operation_db['dbuser']
elif enstoredb['dbhost'].find('.fnal.gov') == -1:
print "no database host defined for this node"
sys.exit(0)
cluster = get_cluster(enstoredb['db_host'])
script_host = get_script_host(cluster)
DEFAULT_LIBRARIES = get_default_library(cluster)
# get_db() -- initialize a database connection
# global db connection
db = get_db()
edb = get_edb(enstoredb)
npl = 10 # number of items per line
# get_rem_ticket_number(rem_res)
# get ticket number from remedy API
# rem_res is the result (array of lines) from remedy API
# get_unfinished_job(cluster) -- get unfinished job of certain cluster
# decode_job(job) -- decode the type of job from its name
# is_done(job) -- is this job done?
# try_close_all(cluster) -- try close open job in cluster
# auto_close_all() -- automatically close all finished jobs
# create_job() -- generic job creation
# get_job_by_name() -- from a name to find the job; name is unique
# get_job_by_id() -- get_job_using internal id
# get_job_by_time() -- get job using time frame
# retrieve_job() -- get all related information of this job
# get_job_tasks(name) -- show the tasks related to this job
# start_job_task(job_name, task_id) -- start a task
# finish_job_task(job_name, task_id) -- finish/close a task
# get_current_task(name) -- get current task
# get_next_task(name) -- get next task
# has_finished(job, task) -- has task (job, task) finished?
# is_started(job, task) -- has task (job, task) started?
# start_next_task(job) -- start next task
# finish_current_task(job) -- finish current task
# show_current_task(job) -- show current task of job
# show_next_task(job) -- show next task of job
# show(job) -- display a job
# delete(job) -- delete a job
# help(topic) -- help function
# even(i) -- True is i is an even number
# get_caps_per_ticket(lib_type) -- determine caps per ticket
# same_tape_library(libs) -- check if all library are using the same robot
# dump() -- dump all global variables
# complex operations
# CAPS_PER_TICKET = 10
# VOLUMES_PER_CAP = 21
# make_cap_args(d) -- make arguments from a dictionary
# make_cap(list)
# get_max_cap_number(cluster)
PROMPT = "operation> "
# shell() -- interactive shell
# execute(args) -- execute args[0], args[1:]
if __name__ == '__main__':
if len(sys.argv) < 2:
shell()
else:
res = execute(sys.argv[1:])
if res:
if type(res) == type([]):
for i in res:
print i
else:
print res
| 28.848984 | 357 | 0.634675 | #!/usr/bin/env python
"""
Schema
Schema | Name | Type | Owner
--------+-------------------+----------+---------
public | job | table | huangch
public | job_definition | table | huangch
public | job_definition_id | sequence | huangch
public | job_id | sequence | huangch
public | object | table | huangch
public | progress | table | huangch
public | task | table | huangch
public | task_id | sequence | huangch
Table "public.job"
Column | Type | Modifiers
---------+-----------------------------+------------------------------------------
id | integer | not null default nextval('job_id'::text)
name | character varying | not null
type | integer | not null
start | timestamp without time zone | default now()
finish | timestamp without time zone |
comment | character varying |
id2 | bigint | not null
Indexes:
"job_pkey" PRIMARY KEY, btree (id)
"job_id2_key" UNIQUE, btree (id2)
"job_finish_idx" btree (finish)
"job_name_idx" btree (name)
"job_start_idx" btree ("start")
"job_type_idx" btree ("type")
Foreign-key constraints:
"job_type_fkey" FOREIGN KEY ("type") REFERENCES job_definition(id) ON UPDATE CASCADE ON DELETE CASCADE
Table "public.job_definition"
Column | Type | Modifiers
---------+-------------------+-----------------------------------------------------
id | integer | not null default nextval('job_definition_id'::text)
name | character varying | not null
tasks | integer |
remarks | character varying |
Indexes:
"job_definition_pkey" PRIMARY KEY, btree (id)
"job_definition_name_idx" btree (name)
Table "public.task"
Column | Type | Modifiers
------------+-------------------+-------------------------------------------
id | integer | not null default nextval('task_id'::text)
seq | integer | not null
job_type | integer | not null
dsc | character varying |
action | character varying |
comment | character varying |
auto_start | character(1) | default 'm'::bpchar
Indexes:
"task_pkey" PRIMARY KEY, btree (seq, job_type)
Foreign-key constraints:
"task_job_type_fkey" FOREIGN KEY (job_type) REFERENCES job_definition(id) ON UPDATE CASCADE ON DELETE CASCADE
Table "public.progress"
Column | Type | Modifiers
---------+-----------------------------+---------------
job | integer | not null
task | integer | not null
start | timestamp without time zone | default now()
finish | timestamp without time zone |
comment | character varying |
args | character varying |
result | character varying |
Indexes:
"progress_job_idx" btree (job)
"progress_start_idx" btree ("start")
Foreign-key constraints:
"progress_job_fkey" FOREIGN KEY (job) REFERENCES job(id) ON UPDATE CASCADE ON DELETE CASCADE
Table "public.object"
Column | Type | Modifiers
--------+-------------------+-----------
job | integer | not null
object | character varying |
Indexes:
"object_job_idx" btree (job)
"object_object_idx" btree ("object")
Foreign-key constraints:
"object_job_fkey" FOREIGN KEY (job) REFERENCES job(id) ON UPDATE CASCADE ON DELETE CASCADE
"""
import os
import pprint
import pwd
import time
import types
import smtplib
import stat
import sys
# enstore import
import configuration_client
import e_errors
import enstore_functions2
import dbaccess
try:
import snow_fliptab
except:
pass
debug = False
# debug = True
csc = {}
# timestamp2time(ts) -- convert "YYYY-MM-DD HH:MM:SS" to time
def timestamp2time(s):
if s == '1969-12-31 17:59:59':
return -1
else:
# take care of daylight saving time
tt = list(time.strptime(s, "%Y-%m-%d %H:%M:%S"))
tt[-1] = -1
return time.mktime(tuple(tt))
# time2timestamp(t) -- convert time to "YYYY-MM-DD HH:MM:SS"
def time2timestamp(t):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
# is_time(t) -- check if t is of time type "YYYY-MM-DD_hh:mm:ss"
# got to handle space
def is_time(t):
if len(t) != 19:
return False
if t[4] == '-' and t[7] == '-' and (t[10] == '_' or t[10] == ' ') and \
t[13] == ':'and t[16] == ':':
return True
else:
return False
# send_mail(subject, message) -- simplified sendmail
def send_mail(subject, message):
from_addr = pwd.getpwuid(os.getuid())[0]+'@'+os.uname()[1]
if os.environ['ENSTORE_MAIL']:
to_addr = os.environ['ENSTORE_MAIL']
else:
to_addr = "enstore-admin@fnal.gov"
msg = [ "From: %s"%(from_addr),
"To: %s"%(to_addr),
"Subject: %s"%(subject),
""] + message
return smtplib.SMTP('localhost').sendmail(from_addr, [to_addr], '\n'.join(msg))
TEMP_DIR = '/tmp/operation'
# make it if it is not there
if not os.access(TEMP_DIR, os.F_OK):
os.makedirs(TEMP_DIR)
def clean_up_temp_dir():
for i in os.listdir(TEMP_DIR):
os.remove(os.path.join(TEMP_DIR, i))
DATABASEHOST = 'stkensrv0n.fnal.gov'
#DATABASEHOST = 'localhost'
DATABASEPORT = 8800
DATABASENAME = 'operation'
DATABASEUSER = None
# This is a hard wired configuration
def library_type(cluster, lib):
if cluster == 'D0':
if lib in ('D0-9940B','mezsilo'):
return '9310'
if lib in ('samlto2','samlto'):
return 'aml2'
if lib in ('D0-LTO4F1','D0-10KCF1'):
return '8500F1'
if lib in ('D0-LTO4G1',):
return '8500G1'
if lib in ('D0-LTO4GS','D0-10KCGS'):
return '8500GS'
elif cluster == 'STK':
if lib in ('CD-9940B','9940'):
return '9310'
if lib in ('CD-LTO3','CD-LTO4G1','CD-10KCG1','CD-10KDG1'):
return '8500G1'
if lib in ('CD-LTO3GS','CD-LTO4GS','CD-10KCGS','CD-10KDGS'):
return '8500GS'
if lib in ('CD-LTO4F1','CD-10KCF1','CD-10KDF1'):
return '8500F1'
elif cluster == 'CDF':
if lib in ('CDF-9940B','cdf'):
return '9310'
if lib in ('CDF-LTO3','CDF-LTO4G1'):
return '8500G1'
if lib in ('CDF-LTO4GS','CDF-10KCGS'):
return '8500GS'
if lib in ('CDF-LTO4F1','CDF-10KCF1'):
return '8500F1'
elif cluster == 'GCC':
if lib in ('LTO3','LTO4','10KCG1'):
return '8500G1'
if lib == 'LTO4F1':
return '8500F1'
else:
return None
# get_cluster(host) -- determine current cluster
def get_cluster(host):
if host[:2] == 'd0':
return 'D0'
elif host[:3] == 'stk' or host[:7] == 'enstore':
return 'STK'
elif host[:3] == 'cdf':
return 'CDF'
elif host[:3] == 'gcc':
return 'GCC'
else:
return None
# get_script_host(cluster) -- determine script host
def get_script_host(cluster):
if cluster.upper()[:2] == 'D0':
return 'd0ensrv4n.fnal.gov'
elif cluster.upper()[:3] == 'STK':
return 'stkensrv4n.fnal.gov'
elif cluster.upper()[:3] == 'CDF':
return 'cdfensrv4n.fnal.gov'
elif cluster.upper()[:3] == 'GCC':
return 'gccensrv2.fnal.gov'
else:
return 'localhost'
# get_write_protect_script_path(library_type) -- determine script path
def get_write_protect_script_path(lib_type):
if lib_type in ['9310', 'aml2', '8500G1', '8500GS', '8500F1']:
return '/home/enstore/isa-tools/' + lib_type + '_write_protect_work'
else:
return '/tmp'
# get_write_permit_script_path(library_type) -- determine script path
def get_write_permit_script_path(lib_type):
if lib_type in ['9310', 'aml2', '8500G1', '8500GS', '8500F1']:
return '/home/enstore/isa-tools/' + lib_type + '_write_permit_work'
else:
return '/tmp'
# get_default_library(cluster)
def get_default_library(cluster):
if cluster == 'STK':
return "9940,CD-9940B"
elif cluster == 'CDF':
return "cdf,CDF-9940B"
elif cluster == 'D0':
return "mezsilo,D0-9940B"
elif cluster == 'GCC':
return "LTO3"
else:
return "unknown"
# get_qualifier(library_type) -- determine name qualifier
def get_qualifier(lib_type):
if lib_type == 'aml2':
return 'a'
elif lib_type == '8500GS':
return 'r'
elif lib_type == '8500G1':
return 's'
elif lib_type == '8500F1':
return 't'
else:
return ''
csc = configuration_client.ConfigurationClient((enstore_functions2.default_host(),
enstore_functions2.default_port()))
enstoredb = csc.get('database')
operation_db = csc.get('operation_db')
if operation_db['status'][0] == e_errors.OK:
DATABASENAME = operation_db['dbname']
DATABASEPORT = operation_db['dbport']
DATABASEHOST = operation_db['dbhost']
DATABASEUSER = operation_db['dbuser']
elif enstoredb['dbhost'].find('.fnal.gov') == -1:
print "no database host defined for this node"
sys.exit(0)
cluster = get_cluster(enstoredb['db_host'])
script_host = get_script_host(cluster)
DEFAULT_LIBRARIES = get_default_library(cluster)
# get_db() -- initialize a database connection
def get_db():
return dbaccess.DatabaseAccess(maxconnections=1,
database=DATABASENAME,
host=DATABASEHOST,
port=DATABASEPORT,
user=DATABASEUSER)
def get_edb(enstoredb):
return dbaccess.DatabaseAccess(maxconnections=1,
database=enstoredb['dbname'],
host=enstoredb['db_host'],
port=enstoredb['db_port'],
user=enstoredb['dbuser'])
# global db connection
db = get_db()
edb = get_edb(enstoredb)
npl = 10 # number of items per line
def show_cap(header, list):
label = "CAP %d:"%(header)
print label,
nl = 1
for i in list:
print i,
if nl % npl == 0:
print
print " "*len(label),
nl = nl + 1
print
# get_rem_ticket_number(rem_res)
# get ticket number from remedy API
# rem_res is the result (array of lines) from remedy API
def get_rem_ticket_number(rem_res):
for i in rem_res:
t = i.split()
if len(t) > 5:
if t[0] == 'Entry' and \
t[1] == 'created' and \
t[2] == 'with' and \
t[3] == 'id' and \
t[4] == '=':
return "HELPDESK_TICKET_"+t[5]
return 'UNKNOWN_TICKET'
# get_unfinished_job(cluster) -- get unfinished job of certain cluster
def get_unfinished_job(cluster=None):
res = None
if cluster:
q = "select name from job where name ilike '%s%%' and finish is null"%(cluster)
res = db.query_getresult()
else:
q = "select name from job where finish is null"
res = db.query_getresult(q)
jobs = []
for i in res:
jobs.append(i[0])
return jobs
# decode_job(job) -- decode the type of job from its name
def decode_job(job):
if job[:3] == 'STK' or job[:3] == "CDF":
cluster = job[:3]
if job[3] in ['a', 'r', 's', 't']:
if job[3] == 'a':
lt = 'aml2'
elif job[3] == 'r':
lt = '8500GS'
elif job[3] == 's':
lt = '8500G1'
elif job[3] == 't':
lt = '8500F1'
else:
lt = 'unknown'
type = job[5]
t = job[6:].split('-')
job_range = range(int(t[0]), int(t[1])+1)
else:
lt = '9310'
type = job[4]
t = job[5:].split('-')
job_range = range(int(t[0]), int(t[1])+1)
elif job[:2] == 'D0':
cluster = job[:2]
if job[2] in ['a', 'r', 's', 't']:
if job[2] == 'a':
lt = 'aml2'
elif job[2] == 'r':
lt = '8500GS'
elif job[2] == 's':
lt = '8500G1'
elif job[2] == 't':
lt = '8500F1'
else:
lt = 'unknown'
type = job[4]
t = job[5:].split('-')
job_range = range(int(t[0]), int(t[1])+1)
else:
lt = '9310'
type = job[3]
t = job[4:].split('-')
job_range = range(int(t[0]), int(t[1])+1)
return cluster, type, job_range, lt
# is_done(job) -- is this job done?
def is_done(job):
c, t, r, lt = decode_job(job)
if c != cluster: # not on this cluster
return 0
if t == 'E': # write enable
p = get_write_permit_script_path(lt)
elif t == 'P': # write protect
p = get_write_protect_script_path(lt)
else: # don't know
if debug:
print "unknown job", job, c, t, `r`
return 0
t0 = 0
for i in r:
pp = os.path.join(p, `i`)
if os.access(pp, os.F_OK):
return 0
elif os.access(pp+'.done', os.F_OK):
t1 = os.stat(pp+'.done')[stat.ST_CTIME]
if t1 > t0:
t0 = t1
else:
return 0
return t0
# try_close_all(cluster) -- try close open job in cluster
def try_close_all(cluster):
j_list = get_unfinished_job(cluster)
msg = []
for i in j_list:
t = is_done(i)
if t:
print i, "is done at", time.ctime(t)
finish_current_task(i, result='DONE', comment='AUTO-CLOSE', timestamp=time2timestamp(t))
print i, "is closed at", time.ctime(time.time())
msg.append("%s is closed with timestamp %s"%(
i, time.ctime(t)))
else:
print i, "is not done yet"
if msg:
send_mail("Closing tab-flipping job(s)", msg)
# auto_close_all() -- automatically close all finished jobs
def auto_close_all():
global cluster
if os.uname()[1] != script_host:
print "Wrong host %s (%s)"%(os.uname()[1], script_host)
return
try_close_all(cluster)
# create_job() -- generic job creation
def create_job(name, type, args, comment = ''):
association = None
# check if any of the args are in open job
problem_args = {}
for i in args:
q = "select job.id, job.name, job_definition.name as job_def, \
job.start from job, job_definition, object \
where \
object.object = '%s' and \
job.id = object.job and \
job.finish is null and \
job.type = job_definition.id"%(i)
if debug:
print q
res = db.query_getresult(q)
if res:
problem_args[i] = res[0]
if problem_args:
for i in problem_args.keys():
print "%s is already in unfinished job %d %s %s %s"%(
i,
problem_args[i]['id'],
problem_args[i]['name'],
problem_args[i]['job_def'],
problem_args[i]['start'])
return -1
# is there a time stamp specified?
if is_time(args[0]):
q = "insert into job (name, type, start, comment) \
values ('%s', \
(select id from job_definition where \
name = '%s'), '%s', '%s');"%(
name, type, args[0].replace('_', ' '), comment)
args = args[1:]
else:
q = "insert into job (name, type, comment) \
values ('%s', (select id from job_definition where \
name = '%s'), '%s');"%(
name, type, comment)
if debug:
print q
db.insert(q)
# get job id
q = "select id from job where name = '%s';"%(
name)
if debug:
print q
id = db.query_getresult(q)[0][0]
for i in args:
# is it association setting?
if i[-1] == ':':
association = i[:-1]
else:
# does it have embedded association?
p = i.split(':')
if len(p) > 1: # yes
q = "insert into object (job,object,association) values (%d, '%s', '%s');"%(id, p[1], p[0])
else: # nope
if association:
q = "insert into object (job, object, association) values (%d, '%s', '%s');"%(id, i, association)
else:
q = "insert into object (job, object) values (%d, '%s');"%(id, i)
if debug:
print q
db.insert(q)
return id
# get_job_by_name() -- from a name to find the job; name is unique
def get_job_by_name(name):
q = "select * from job where name = '%s';"%(name)
if debug:
print q
res = db.query_dictresult(q)
if res:
return retrieve_job(res[0])
else:
return None
# get_job_by_id() -- get_job_using internal id
def get_job_by_id(id):
q = "select * from job where id = %d;"%(id)
if debug:
print q
res = db.query_dictresult(q)
if res:
return retrieve_job(res[0])
else:
return None
# get_job_by_time() -- get job using time frame
def get_job_by_time(after, before = None):
if not before: # default now()
before = time2timestamp(time.time())
if type(after) != types.StringType:
after = time2timestamp(after)
if type(before) != types.StringType:
before = time2timestamp(before)
q = "select * from job where start >= '%s' and start <= '%s' \
order by start;"%(after, before)
if debug:
print q
res = db.query_dictresult(q)
if res:
return retrieve_job(res[0])
else:
return None
# retrieve_job() -- get all related information of this job
def retrieve_job(job):
# assemble its related objects
q = "select * from object where job = %d order by association, object;"%(job['id'])
object = {}
if debug:
print q
res = db.query_getresult(q)
for j in res:
if object.has_key(j[2]):
object[j[2]].append(j[1])
else:
object[j[2]] = [j[1]]
job['object'] = object
# list its related tasks
q = "select * from job_definition where id = %d;"%(job['type'])
if debug:
print q
job_definition = db.query_dictresult(q)[0]
job['job_definition'] = job_definition
q = "select * from task left outer join progress \
on (progress.job = %d and task.seq = progress.task) \
where task.job_type = %d \
order by seq;"%(job['id'], job['type'])
if debug:
print q
job['task'] = db.query_dictresult(q)
job['current'] = get_current_task(job['name'])
job['next'] = get_next_task(job['name'])
if job['finish']:
job['status'] = 'finished'
else:
if job['current'] == 0:
job['status'] = 'not_started'
else:
job['status'] = 'in_progress'
return job
# get_job_tasks(name) -- show the tasks related to this job
def get_job_tasks(name):
q = "select seq, dsc, action from task, job \
where job.name = '%s' and job.type = task.job_type \
order by seq;"%(name)
if debug:
print q
return db.query_dictresult(q)
# start_job_task(job_name, task_id) -- start a task
def start_job_task(job_name, task_id, args=None, comment=None, timestamp=None):
if has_started(job_name, task_id):
return "job %s task %d has already started"%(job_name, task_id)
if args:
args = "'%s'"%(args)
else:
args = "null"
if comment:
comment = "'%s'"%(comment)
else:
comment = "null"
if timestamp:
q = "insert into progress (job, task, start, comment, args) \
values ((select id from job where name = '%s'), %d, '%s', %s, %s);"%(
job_name, task_id, timestamp, comment, args)
else:
q = "insert into progress (job, task, comment, args) \
values ((select id from job where name = '%s'), %d, %s, %s);"%(
job_name, task_id, comment, args)
if debug:
print q
res = db.insert(q)
return `res`
# finish_job_task(job_name, task_id) -- finish/close a task
def finish_job_task(job_name, task_id, comment=None, result=None, timestamp=None):
if not has_started(job_name, task_id):
return "job %s task %d has not started"%(job_name, task_id)
if has_finished(job_name, task_id):
return "job %s task %d has lready finished"%(job_name, task_id)
if result:
result = "'%s'"%(str(result))
else:
result = "null"
if not timestamp:
timestamp = "now()"
else:
timestamp = "'%s'"%(timestamp)
if comment:
q = "update progress \
set finish = %s, comment = '%s', \
result = %s \
where job = (select id from job where name = '%s') \
and task = %d;"%(
timestamp, comment, result, job_name, task_id)
else:
q = "update progress \
set finish = %s, result = %s \
where job = (select id from job where name = '%s') \
and task = %d"%(
timestamp, result, job_name, task_id)
if debug:
print q
res = db.insert(q)
return `res`
# get_current_task(name) -- get current task
def get_current_task(name):
q = "select case \
when max(task) is null then 0 \
else max(task) \
end \
from progress, job \
where \
job.name = '%s' and \
progress.job = job.id and \
progress.start is not null;"%(name)
if debug:
print q
res = db.query_getresult(q)
return res[0][0]
# get_next_task(name) -- get next task
def get_next_task(name):
q = "select tasks, finish from job, job_definition where \
job.name = '%s' and \
job.type = job_definition.id;"%(name)
if debug:
print q
res = db.query_getresult(q)
tasks = res[0][0]
finish = res[0][1]
if finish:
return 0
ct = get_current_task(name)
if ct == tasks:
return 0
else:
return ct + 1
# has_finished(job, task) -- has task (job, task) finished?
def has_finished(job, task):
q = "select p.finish from progress p, job j where \
j.name = '%s' and \
p.job = j.id and p.task = %d and p.finish is not null;"%(
job, task)
if debug:
print q
res = db.query_getresult(q)
if not res:
return False
else:
return True
# is_started(job, task) -- has task (job, task) started?
def has_started(job, task):
q = "select p.start from progress p, job j where \
j.name = '%s' and \
p.job = j.id and p.task = %d and p.start is not null;"%(
job, task)
if debug:
print q
res = db.query_getresult(q)
if not res:
return False
else:
return True
# start_next_task(job) -- start next task
def start_next_task(job, args=None, comment=None, timestamp=None):
res = []
ct = get_current_task(job)
nt = get_next_task(job)
if nt:
if ct == 0 or has_finished(job, ct):
res2 = start_job_task(job, nt, args, comment, timestamp)
if res2:
res.append(res2)
else:
res.append('current task has not finished')
else:
res.append('no more tasks')
return res
# finish_current_task(job) -- finish current task
def finish_current_task(job, result = None, comment = None, timestamp=None):
res = []
ct = get_current_task(job)
if ct:
if has_finished(job, ct):
res.append('current task has already finished')
else:
res2 = finish_job_task(job, ct, comment, result, timestamp)
if res2:
res.append(res2)
else:
res.append('no current task')
return res
# show_current_task(job) -- show current task of job
def show_current_task(j):
job = get_job_by_name(j)
if not job:
return "%s does not exist"%(j)
if job['status'] == "not_started":
return "%s has not started"%(j)
q = "select d.name as desc, t.seq, \
t.dsc, t.action, \
p.start, p.finish \
from job j, job_definition d, \
task t, progress p \
where \
j.id = %d and \
t.seq = %d and \
t.job_type = d.id and \
p.job = j.id and \
p.task = t.seq and \
j.type = d.id;"%(
job['id'], job['current'])
if debug:
print q
ct = db.query_dictresult(q)[0]
if ct['finish'] == None:
ct['finish'] = ""
if ct['action'] == None:
ct['action'] = 'default'
return "%s\t%s\t%3d %s\t(%s)\t%s - %s"%(
j, ct['desc'], ct['seq'],
ct['dsc'], ct['action'],
ct['start'], ct['finish'])
# show_next_task(job) -- show next task of job
def show_next_task(j):
job = get_job_by_name(j)
if not job:
return "%s does not exist"%(j)
if job['status'] == "finished":
return "%s has finished"%(j)
if job['next'] == 0:
return "%s is on the last step"%(j)
q = "select d.name as desc, t.seq, \
t.dsc, t.action \
from job j, job_definition d, \
task t \
where \
j.id = %d and \
t.seq = %d and \
t.job_type = d.id and \
j.type = d.id;"%(
job['id'], job['next'])
if debug:
print q
ct = db.query_dictresult(q)[0]
if ct['action'] == None:
ct['action'] = 'default'
return "%s\t%s\t%3d %s\t(%s)"%(
j, ct['desc'], ct['seq'],
ct['dsc'], ct['action'])
# show(job) -- display a job
def show(job):
if not job:
return
print
print " Name: %s"%(job['name'])
print " Type: %s (%s)"%(job['job_definition']['name'],
job['job_definition']['remarks'])
print " Status: %s"%(job['status'])
print " Start: %s"%(job['start'])
print " Finish: %s"%(job['finish'])
print " #tasks: %d"%(job['job_definition']['tasks'])
print " Tasks:"
print "Current: %d"%(job['current'])
print " Next: %d"%(job['next'])
for t in job['task']:
if t['action'] == None:
t['action'] = "default"
print "%3d %s %40s (%s) %s %s %s %s %s"%(
t['seq'], t['auto_start'], t['dsc'],
t['action'], t['start'], t['finish'], t['args'],
t['result'], t['comment'])
print "Objects:"
for i in job['object'].keys():
print i+':',
for j in job['object'][i]:
print j,
print
# delete(job) -- delete a job
def delete(job):
if job:
q = "delete from job where name = '%s';"%(job)
if debug:
print q
db.delete(q)
def create_write_protect_on_job(name, args, comment = ''):
return create_job(name, 'WRITE_PROTECTION_TAB_ON', args, comment)
def create_write_protect_off_job(name, args, comment = ''):
return create_job(name, 'WRITE_PROTECTION_TAB_OFF', args, comment)
# help(topic) -- help function
def help(topic=None):
if not topic:
print "operation.py create write_protect_on|write_protect_off <job> [[<association>:] [<associate>:]<object>]+"
print " -- creating a job"
print "operation.py list [all|open|finished|closed|completed|<job>+|has <object>]|recent <n>"
print " -- list job(s)"
print "operation.py show <job>+"
print " -- show details of job(s)"
print "operation.py current <job>+"
print " -- show current task(s) of <job>(s)"
print "operation.py next <job>+"
print " -- show next task(s) of <job>(s)"
print "operation.py start <job> [<arg>]"
print " -- start the next task of <job>"
print "operation.py finish <job> [<result>]"
print " -- finish the current task of <job>"
print "operation.py delete <job>+ [sincerely]"
print " -- delete <job>(s)"
print "operation.py find|locate <object>+"
print " -- find jobs that have <object>"
print "operation.py find+|locate+ <objects>+"
print " -- find|locate with details"
print "operation.py relate <job>+"
print " -- find jobs that have common objects"
print "operation.py recommend_write_protect_on [<library_list>] [limit <n>]"
print " -- recommend volumes for flipping write protect tab on"
print "operation.py recommend_write_protect_off [<library_list>] [limit <n>]"
print " -- recommend volumes for flipping write protect tab off"
print "operation.py auto_write_protect_on [<library_list>] [no_limit]"
print " -- automatically generate helpdesk ticket for flipping WP on"
print "operation.py auto_write_protect_off [<library_list>] [no_limit]"
print " -- automatically generate helpdesk ticket for flipping WP off"
print "operation.py auto_close_all"
print " -- try to close all finished open jobs on this cluster"
print
print "try:"
print "operation.py help <topic>"
elif topic == "create":
print
print "operation.py create write_protect_on|write_protect_off <job> [[<association>:] [<associate>:]<object>]+"
print
print "<job> is a user defined unique name of the job"
print "<association> is a way to group objects. By default, there is no association"
print "<association>: change the association for the rest of the objects in list"
print " It is allowed to have multiple <association>: in the command."
print " Each <association>: changes the global association setting."
print "<association>:<object> temporarily override default association for <object>"
print
print "EXAMPLE:"
print "operation.py create write_protect_on WP3 CAP3: VO2093 VO2094 VO2095 VO2096 VO2097 VO2098 VO2099 VO2152 VO2154 VO2195 VO2196 VO2197 VO2198 VO2199 VO2203 VO2206 VO2207 VO2208 VO2209 VO2211 VO2213 CAP4: VO2224 VO2225 VO2226 VO2227 VO2245 VO2246 VO2252 VO2253 VO2254 VO2256 VO2257 VO2258 VO2259 VO2501 VO2532 VO2533 VO2534 VO2540 VO2541 VO2542 VO2544"
elif topic == "list":
print
print "operation.py list [all|open|finished|closed|completed|<job>+|has <object>]"
print
print "all: list all jobs"
print "open: list all open (not closed) jobs"
print "finished|closed|completed: list all completed jobs. finished|closed|completed are the same thing"
print "<job>+ : list named jobs"
print "has <object>: list all jobs that have <object> as an argument"
elif topic == "show":
print
print "operation.py show <job>+"
print
print "show details of <job>s in the list. <job> is addressed by its unique name"
elif topic == "current":
print
print "operation.py current <job>+"
print
print "show the current task of <job>."
print "A current task is one that has stared but its next task has not started"
print "A job can have at most one such task at any time"
print "in case of a not yet started job, current task is task 0"
print "in case of a finished job, current task is the last task"
elif topic == "next":
print
print "operation.py next <job>+"
print
print "show next task of <job>"
print "next task is one that has not started and its previous task has finished."
print "in case of a have-not-started job, next task is the first task."
print "in case of a finished job, next task is task 0"
elif topic == "start":
print
print "operation.py start <job> [<arg>]"
print
print "start the next task of <job> with optional argument"
print "next task can start only if current task has finished"
print
print "EXAMPLE:"
print "operation.py start STKWP3 <help_desk_ticket_id>"
elif topic == "finish":
print
print "operation.py finish <job> [<result>]"
print
print "finish current task of <job> with optional <result>"
print "EXAMPLE:"
print "operation.py finish STKWP3 DONE"
elif topic == "delete":
print
print "operation.py delete <job>+ [sincerely]"
print
print "delete <job> in the list"
print "this is a dangerous command, use with extra care"
print '<job>s will not be deleted unless "sincerely" is specified at the end'
elif topic == "find" or topic == "locate":
print
print "operation.py find|locate <object>+"
print
print "list the jobs that have <object> as an argument"
elif topic == "find+" or topic == "locate+":
print
print "operation.py find+|locate+ <object>+"
print
print "same as find|locate but show details of the jobs"
elif topic == "recommend_write_protect_on" or topic == "recommend_write_protect_off":
print
print "operation.py recommend_write_protect_on [<library_list>] [limit <n>]"
print "operation.py recommend_write_protect_off [<library_list>] [limit <n>]"
print
print "list recommended volumes for write protect tab flipping on/off"
print
print "<library_list> is a list of media types separated by comma ','"
print "when <library_list> is omitted, the default list takes place"
print
print "with 'limit <n>', it only lists, at most, first <n> volumes for the job"
print "otherwise, it lists all"
print
print "EXAMPLES:"
print "operation.py recommend_write_protect_on"
print "operation.py recommend_write_protect_on 9940,CD-9940B"
print "operation.py recommend_write_protect_on limit 100"
print "operation.py recommend_write_protect_on 9940,CD-9940B limit 100"
print "operation.py recommend_write_protect_off"
print "operation.py recommend_write_protect_off 9940,CD-9940B"
print "operation.py recommend_write_protect_off limit 100"
print "operation.py recommend_write_protect_off 9940,CD-9940B limit 100"
elif topic == "auto_write_protect_on" or topic == "auto_write_protect_off":
print
print "operation.py auto_write_protect_on [<library_list>] [no_limit]"
print "operation.py auto_write_protect_off [<library_list>] [no_limit]"
print
print "from recommended list, create a job for write protect tab flipping on/off"
print "and generate a helpdesk ticket automatically"
print
print "<library_list> is a list of media types separated by comma ','"
print
print "there is a default limit of 10 caps (220 volume)"
print "with 'no_limit', it generates everything in one ticket"
print
print "EXAMPLES:"
print "operation.py auto_write_protect_on"
print "operation.py auto_write_protect_on 9940,CD-9940B"
print "operation.py auto_write_protect_on no_limit"
print "operation.py auto_write_protect_on 9940,CD-9940B no_limit"
print "operation.py auto_write_protect_off"
print "operation.py auto_write_protect_off 9940,CD-9940B"
print "operation.py auto_write_protect_off no_limit"
print "operation.py auto_write_protect_off 9940,CD-9940B no_limit"
elif topic == 'auto_close_all':
print
print "operation.py auto_close_all"
print
print "try to close all finished open jobs on this cluster"
print
print "this command is meant for script/cronjob or experts!!"
else:
print "don't know anything about %s"%(topic)
print
help()
# even(i) -- True is i is an even number
def even(i):
return int(i/2)*2 == i
# get_caps_per_ticket(lib_type) -- determine caps per ticket
def caps_per_ticket(lib_type):
if lib_type == '9310':
return 10
elif lib_type == 'aml2':
return 7
elif lib_type[:4] == '8500':
return 5
else:
return None
def volumes_per_cap(lib_type):
if lib_type == '9310':
return 21
elif lib_type == 'aml2':
return 30
elif lib_type[:4] == '8500':
return 39
else:
return None
# same_tape_library(libs) -- check if all library are using the same robot
def same_tape_library(libs):
l = libs.split(",")
t = library_type(cluster, l[0])
if len(l) > 1:
for i in l[1:]:
if library_type(cluster, i) != t:
return None
return t
# dump() -- dump all global variables
def dump():
for i in __builtins__.globals().keys():
if i[:2] == '__': # internal
continue
if type(__builtins__.globals()[i]) == type(1) or \
type(__builtins__.globals()[i]) == type(1.0) or \
type(__builtins__.globals()[i]) == type("") or \
type(__builtins__.globals()[i]) == type({}) or \
type(__builtins__.globals()[i]) == type([]):
print i, '=',
pprint.pprint(__builtins__.globals()[i])
# complex operations
# CAPS_PER_TICKET = 10
# VOLUMES_PER_CAP = 21
def recommend_write_protect_job(library=DEFAULT_LIBRARIES, limit=None):
# check if they are of the same robot
lt = same_tape_library(library)
if not lt:
print "Error: %s are not the same robot"%(library)
return {}
CAPS_PER_TICKET = caps_per_ticket(lt)
VOLUMES_PER_CAP = volumes_per_cap(lt)
# take care of limit:
# if limit == None: limit = default
# if limit == 0: no limit
# if limit == n, let it be n
if limit == None: # use default
limit = CAPS_PER_TICKET * VOLUMES_PER_CAP
if lt == 'aml2':
op = 'aWP'
elif lt == '8500GS':
op = 'rWP'
elif lt == '8500G1':
op = 'sWP'
elif lt == '8500F1':
op = 'tWP'
else:
op = 'WP'
# get max cap number
n = get_max_cap_number(cluster, op) + 1
# get exclusion list:
q = "select object from object, job \
where \
object.job = job.id and \
job.finish is null;"
if debug:
print q
excl = db.query_getresult(q)
# take care of libraries
lb = library.split(",")
lbs = "(library = '%s'"%(lb[0])
for i in lb[1:]:
lbs = lbs + " or library = '%s'"%(i)
lbs = lbs+")"
q = "" # to make lint happy
if excl:
exclusion = "'%s'"%(excl[0][0])
for i in excl[1:]:
exclusion = exclusion+','+"'%s'"%(i[0])
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'full' and \
write_protected != 'y' and \
not storage_group in (select * from no_flipping_storage_group) and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not file_family like '%%-MIGRATION%%' and \
not label in (%s) \
order by si_time_1 asc"%(lbs, exclusion)
else:
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'full' and \
write_protected != 'y' and \
not storage_group in (select * from no_flipping_storage_group) and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not file_family like '%%-MIGRATION%%' \
order by si_time_1 asc "%(lbs)
if limit:
q = q + ' limit %d;'%(limit)
else:
q = q + ';'
if debug:
print q
res = edb.query_getresult(q)
job = {}
j = 0
cap_n = n
for i in range(len(res)):
if j == 0:
job[cap_n] = []
job[cap_n].append(res[i][0])
j = j + 1
if j >= VOLUMES_PER_CAP:
j = 0
cap_n = cap_n + 1
return job
def recommend_write_permit_job(library=DEFAULT_LIBRARIES, limit=None):
# check if they are of the same robot
lt = same_tape_library(library)
if not lt:
print "Error: %s are not the same robot"%(library)
return {}
CAPS_PER_TICKET = caps_per_ticket(lt)
VOLUMES_PER_CAP = volumes_per_cap(lt)
# take care of limit:
# if limit == None: limit = default
# if limit == 0: no limit
# if limit == n, let it be n
if limit == None: # use default
limit = CAPS_PER_TICKET * VOLUMES_PER_CAP
if lt == 'aml2':
op = 'aWE'
elif lt == '8500GS':
op = 'rWE'
elif lt == '8500G1':
op = 'sWE'
elif lt == '8500F1':
op = 'tWE'
else:
op = 'WE'
# get max cap number
n = get_max_cap_number(cluster, op) + 1
# get exclusion list:
q = "select object from object, job \
where \
object.job = job.id and \
job.finish is null;"
if debug:
print q
excl = db.query_getresult(q)
# take care of libraries
lb = library.split(",")
lbs = "(library = '%s'"%(lb[0])
for i in lb[1:]:
lbs = lbs + " or library = '%s'"%(i)
lbs = lbs+")"
q = "" # to make lint happy
if excl:
exclusion = "'%s'"%(excl[0][0])
for i in excl[1:]:
exclusion = exclusion+','+"'%s'"%(i[0])
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'none' and \
write_protected != 'n' and \
not storage_group in (select * from no_flipping_storage_group) and \
not file_family like '%%-MIGRATION%%' and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not label in (%s) \
order by label"%(lbs, exclusion)
else:
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'none' and \
write_protected != 'n' and \
not storage_group in (select * from no_flipping_storage_group) and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not file_family like '%%-MIGRATION%%' \
order by label "%(lbs)
if limit:
q = q + " limit %d;"%(limit)
else:
q = q + ";"
if debug:
print q
res = edb.query_getresult(q)
job = {}
j = 0
cap_n = n
for i in range(len(res)):
if j == 0:
job[cap_n] = []
job[cap_n].append(res[i][0])
j = j + 1
if j >= VOLUMES_PER_CAP:
j = 0
cap_n = cap_n + 1
return job
# make_cap_args(d) -- make arguments from a dictionary
def make_cap_args(d):
res = []
for k in d.keys():
if d[k]:
res.append('CAP' + str(k) + ':')
for i in d[k]:
res.append(i)
return res
# make_cap(list)
def make_cap(l, library_type='9310', cap_n = 0):
cap_script = ""
if library_type == '9310':
if cluster == "D0":
cap_script = "/usr/bin/rsh fntt -l acsss 'echo eject 1,0,0 "
elif cluster == "STK":
cap_script = "/usr/bin/rsh fntt -l acsss 'echo eject 0,0,0 "
elif cluster == "CDF":
cap_script = "/usr/bin/rsh fntt2 -l acsss 'echo eject 0,1,0 "
else:
return None
for i in l:
cap_script = cap_script + ' ' + i
cap_script = cap_script + " \\\\r logoff|bin/cmd_proc -l -q 2>/dev/null'\n"
elif library_type == 'aml2':
cap_script = ''
if cap_n % 2: # odd
door = ' E03\n'
else:
door = ' E06\n'
count = 0
for i in l:
if count == 0:
cap_script = cap_script + "dasadmin eject -t 3480 "+ i
else:
cap_script = cap_script +','+ i
count = count + 1
if count == 10:
cap_script = cap_script + door
count = 0
if count != 0:
cap_script = cap_script + door
elif library_type == '8500GS':
cap_script = "/usr/bin/rsh fntt -l acsss 'echo eject 2,1,0 "
for i in l:
cap_script = cap_script + ' ' + i
cap_script = cap_script + " \\\\r logoff|bin/cmd_proc -l -q 2>/dev/null'\n"
elif library_type == '8500G1':
cap_script = "/usr/bin/rsh fntt-gcc -l acsss 'echo eject 0,5,0 "
for i in l:
cap_script = cap_script + ' ' + i
cap_script = cap_script + " \\\\r logoff|bin/cmd_proc -l -q 2>/dev/null'\n"
elif library_type == '8500F1':
if cluster == "D0":
cap_script = "/usr/bin/rsh fntt2 -l acsss 'echo eject 1,9,0 "
elif cluster == "STK":
cap_script = "/usr/bin/rsh fntt2 -l acsss 'echo eject 1,1,0 "
elif cluster == "CDF":
cap_script = "/usr/bin/rsh fntt2 -l acsss 'echo eject 1,5,0 "
else:
return None
for i in l:
cap_script = cap_script + ' ' + i
cap_script = cap_script + " \\\\r logoff|bin/cmd_proc -l -q 2>/dev/null'\n"
return cap_script
# get_max_cap_number(cluster)
def get_max_cap_number(cluster, op_type=''):
q = "select max(to_number(substr(association, 4), 'FM999999')) \
from object, job \
where name like '%s%s%%' and object.job = job.id;"%(
cluster, op_type)
res = db.query_getresult(q)
if res[0][0]:
return int(res[0][0])
else:
return 0
def make_help_desk_ticket(n, cluster, script_host, job, library_type='9310'):
if job == "protect":
action = "lock"
elif job == "permit":
action = "unlock"
else:
action = "do not touch"
VOLUMES_PER_CAP = volumes_per_cap(library_type)
system_name = script_host
short_message = "write %s %d tapes (flip tabs) in %s %s tape library"%(job, n, cluster.lower()+'en', library_type.upper())
long_message = 'Please run "flip_tab %s" on %s to write %s %d tapes (%d caps) in %s enstore %s tape library.'%(action, script_host, job, n, int((n-1)/VOLUMES_PER_CAP)+1, cluster, library_type.upper())
return snow_fliptab.submit_ticket(
Summary=short_message,
Comments=long_message,
CiName = system_name.upper().split('.')[0],
)
def get_last_job_time(cluster, job_type):
q = "select max(start) from job, job_definition \
where job_definition.name = '%s' and \
job.type = job_definition.id and \
job.name like '%s%%';"%(job_type, cluster)
if debug:
print q
res = db.query_getresult(q)[0][0]
if res:
return timestamp2time(res.split('.')[0])
return 0
def get_last_write_protect_on_job_time(l=None,c=None):
if not c:
c = cluster
if l:
lt = same_tape_library(l)
if not lt: # wrong cluster/library
return -1
q = get_qualifier(lt)
if q:
c = c+q
return get_last_job_time(c, 'WRITE_PROTECTION_TAB_ON')
def get_last_write_protect_off_job_time(l=None, c=None):
if not c:
c = cluster
if l:
lt = same_tape_library(l)
if not lt: # wrong cluster/library
return -1
q = get_qualifier(lt)
if q:
c = c+q
return get_last_job_time(c, 'WRITE_PROTECTION_TAB_OFF')
PROMPT = "operation> "
# shell() -- interactive shell
def shell():
while True:
sys.stdout.write(PROMPT)
# handle "..."
line = sys.stdin.readline()
if line == '':
print "quit"
return
elif line == '\n':
continue
parts = line.strip().split('"')
args = []
for i in range(len(parts)):
if even(i):
for j in parts[i].split():
args.append(j)
else:
args.append(parts[i])
if args and (args[0] == 'quit' or args[0] == 'exit'):
break
res = execute(args)
if res:
if type(res) == type([]):
for i in res:
print i
else:
print res
return
# execute(args) -- execute args[0], args[1:]
def execute(args):
n_args = len(args)
if n_args < 1:
return None
cmd = args[0]
if cmd == "dump": # dump all global variables
dump()
return
elif cmd == "list": # list all job
if n_args < 2 or args[1] == 'all':
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition where \
job.type = job_definition.id \
order by job.start;"
if debug:
print q
return db.query(q)
elif args[1] == 'open':
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition where \
job.type = job_definition.id and \
finish is null \
order by job.start;"
if debug:
print q
return db.query(q)
elif args[1] == 'closed' or args[1] == 'completed' or args[1] == 'finished':
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition where \
job.type = job_definition.id and \
not finish is null \
order by job.start;"
if debug:
print q
return db.query(q)
elif args[1] == 'has':
qq = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition, object where \
job.type = job_definition.id \
and object.job = job.id \
and object.object = '%s'"
q = qq%(args[2])
for i in args[2:]:
q = q + " intersect (%s)"%(qq%(i))
q = q + ";"
if debug:
print q
return db.query(q)
elif args[1] == 'recent':
if len(args) > 2:
limit = int(args[2])
else:
limit = 20
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition where \
job.type = job_definition.id \
order by job.start desc limit %d;"%(
limit)
if debug:
print q
return db.query(q)
else:
or_stmt = "job.name like '%s' "%(args[1])
for i in args[2:]:
or_stmt = or_stmt + "or job.name like '%s' "%(i)
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition where \
job.type = job_definition.id \
and (%s) \
order by job.start;"%(or_stmt)
if debug:
print q
return db.query(q)
elif cmd == "show": # show a job
for i in args[1:]:
job = get_job_by_name(i)
# pprint.pprint(job)
show(job)
elif cmd == "create": # create job
if args[1] == "write_protect_on":
return create_write_protect_on_job(args[2], args[3:])
elif args[1] == "write_protect_off":
return create_write_protect_off_job(args[2], args[3:])
else:
return "don't know what to do"
elif cmd == "auto_write_protect_on":
if len(args) > 2:
if args[2] == "no_limit":
res = recommend_write_protect_job(args[1], limit=0)
else:
res = recommend_write_protect_job(args[1])
elif len(args) == 2:
if args[1] == "no_limit":
res = recommend_write_protect_job(limit=0)
else:
res = recommend_write_protect_job(args[1])
else:
res = recommend_write_protect_job()
# create job
if res:
# get the qualifier
if len(args) > 1:
lt = library_type(cluster,args[1].split(',')[0])
else:
lt = library_type(cluster, get_default_library(cluster).split(',')[0])
qf = get_qualifier(lt)
job_name = cluster+qf+'WP'+`min(res.keys())`+'-'+`max(res.keys())`
create_write_protect_on_job(job_name, make_cap_args(res), 'AUTO-GENERATED')
# clean up temp directory
clean_up_temp_dir()
total = 0
for i in res.keys():
total = total + len(res[i])
f = open(os.path.join(TEMP_DIR, str(i)), 'w')
f.write(make_cap(res[i], lt, i))
f.close()
cc = "cd %s; enrcp * %s:%s"%(TEMP_DIR, script_host,
get_write_protect_script_path(lt))
print cc
os.system(cc)
ticket = make_help_desk_ticket(total, cluster, script_host, 'protect', lt)
print "ticket =", ticket
res2 = start_next_task(job_name, ticket)
res2.append(show_current_task(job_name))
res2.append("ticket = "+ticket)
return res2
else:
return "no more volumes to do"
elif cmd == "auto_write_protect_off":
if len(args) > 2:
if args[2] == "no_limit":
res = recommend_write_permit_job(args[1], limit=0)
else:
res = recommend_write_permit_job(args[1])
elif len(args) == 2:
if args[1] == "no_limit":
res = recommend_write_permit_job(limit=0)
else:
res = recommend_write_permit_job(args[1])
else:
res = recommend_write_permit_job()
if res:
# create job
if len(args) > 1:
lt = library_type(cluster,args[1].split(',')[0])
else:
lt = library_type(cluster, get_default_library(cluster).split(',')[0])
qf = get_qualifier(lt)
job_name = cluster+qf+'WE'+`min(res.keys())`+'-'+`max(res.keys())`
create_write_protect_off_job(job_name, make_cap_args(res), 'AUTO-GENERATED')
# clean up temp directory
clean_up_temp_dir()
total = 0
for i in res.keys():
total = total + len(res[i])
f = open(os.path.join(TEMP_DIR, str(i)), 'w')
f.write(make_cap(res[i], lt, i))
f.close()
cc = "cd %s; enrcp * %s:%s"%(TEMP_DIR, script_host,
get_write_permit_script_path(lt))
print cc
os.system(cc)
ticket = make_help_desk_ticket(total, cluster, script_host, 'permit', lt)
print "ticket =", ticket
res2 = start_next_task(job_name, ticket)
res2.append(show_current_task(job_name))
res2.append("ticket = "+ticket)
return res2
else:
return "no more volumes to do"
elif cmd == "recommend_write_protect_on":
if len(args) > 3:
if args[2] == 'limit':
res = recommend_write_protect_job(library = args[1], limit=int(args[3]))
else:
res = recommend_write_protect_job(library = args[1], limit=0)
elif len(args) == 3:
if args[1] == 'limit':
res = recommend_write_protect_job(limit=int(args[2]))
else:
res = recommend_write_protect_job(limit=0)
elif len(args) == 2:
res = recommend_write_protect_job(library = args[1], limit=0)
else:
res = recommend_write_protect_job(limit=0)
# pprint.pprint(res)
for i in res:
show_cap(i, res[i])
total = 0
caps = len(res)
for i in res.keys():
total = total + len(res[i])
print "%d tapes in %d caps"%(total, caps)
return ""
elif cmd == "recommend_write_protect_off":
if len(args) > 3:
if args[2] == 'limit':
res = recommend_write_permit_job(library = args[1], limit=int(args[3]))
else:
res = recommend_write_permit_job(library = args[1], limit=0)
elif len(args) == 3:
if args[1] == 'limit':
res = recommend_write_permit_job(limit=int(args[2]))
else:
res = recommend_write_permit_job(limit=0)
elif len(args) == 2:
res = recommend_write_permit_job(library = args[1], limit=0)
else:
res = recommend_write_permit_job(limit=0)
# pprint.pprint(res)
for i in res:
show_cap(i, res[i])
total = 0
caps = len(res)
for i in res.keys():
total = total + len(res[i])
print "%d tapes in %d caps"%(total, caps)
return ""
elif cmd == "current": # current task
result = []
for i in args[1:]:
result.append(show_current_task(i))
return result
elif cmd == "next": # next task
result = []
for i in args[1:]:
result.append(show_next_task(i))
return result
elif cmd == "start":
timestamp = None
arg = None
comment = None
if n_args < 2:
return "which job?"
job = args[1]
if n_args > 2:
if is_time(args[2]):
timestamp = args[2].replace('_', ' ')
if n_args > 3:
arg = args[3]
if n_args > 4:
comment = args[4]
else:
arg = args[2]
if n_args > 3:
comment = args[3]
res = start_next_task(args[1], arg, comment, timestamp)
res.append(show_current_task(args[1]))
return res
elif cmd == "finish":
timestamp = None
result = None
comment = None
if n_args < 2:
return "which job?"
if n_args > 2:
if is_time(args[2]):
timestamp = args[2].replace('_', ' ')
if n_args > 3:
result = args[3]
if n_args > 4:
comment = args[4]
else:
result = args[2]
if n_args > 3:
comment = args[3]
res = finish_current_task(args[1], result, comment, timestamp)
res.append(show_current_task(args[1]))
return res
elif cmd == "delete":
if args[-1] != "sincerely":
print "If you really want to delete the job(s), you have to say:"
for i in args:
print i,
print "sincerely"
else:
for i in args[1:-1]:
print "deleting job %s ..."%(i),
delete(i)
print "done"
elif cmd == "find" or cmd == "locate":
for i in args[1:]:
print "Jobs that %s is in:"%(i)
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition, object \
where \
job.type = job_definition.id \
and \
object.job = job.id and \
object.object = '%s' \
order by job.start;"%(i)
if debug:
print q
print db.query(q)
elif cmd == "find+" or cmd == "locate+": # with details
for i in args[1:]:
print "Jobs that %s is in:"%(i)
q = "select job.name, job.start \
from job, object \
where \
object.job = job.id and \
object.object = '%s' \
order by job.start;"%(i)
if debug:
print q
res = db.query_getresult(q)
for j in res:
job = get_job_by_name(j[0])
show(job)
elif cmd == "relate":
for i in args[1:]:
print "Job(s) that is(are) related to %s"%(i)
q = "select job.id, job.name, \
job_definition.name as job, start, \
finish, comment \
from job, job_definition where \
job.type = job_definition.id and \
job.name in ( \
select distinct j2.name \
from job j1, job j2, object o1, object o2 \
where \
j1.id <= j2.id and \
j1.id = o1.job and \
j2.id = o2.job and \
o1.object = o2.object and \
j1.name = '%s') \
order by start;"%(i)
if debug:
print q
print db.query(q)
elif cmd == "auto_close_all":
auto_close_all()
elif cmd == "help":
if len(args) == 1:
help()
else:
help(args[1])
else:
return 'unknown command "%s"'%(cmd)
if __name__ == '__main__':
if len(sys.argv) < 2:
shell()
else:
res = execute(sys.argv[1:])
if res:
if type(res) == type([]):
for i in res:
print i
else:
print res
| 43,529 | 0 | 1,288 |
e05a65651cc796d35647fa8b9683ab79e7a876fe | 164 | py | Python | src/data/__init__.py | uiqkos/jobviz | d36d6476f8a56e306eccb25d467f05d924bf759f | [
"MIT"
] | null | null | null | src/data/__init__.py | uiqkos/jobviz | d36d6476f8a56e306eccb25d467f05d924bf759f | [
"MIT"
] | null | null | null | src/data/__init__.py | uiqkos/jobviz | d36d6476f8a56e306eccb25d467f05d924bf759f | [
"MIT"
] | null | null | null | from src.data.load_dictionaries import load_dictionaries, create_key_skills
from src.data.load_vacancies import load_vacancies
from src.data.vacancy import Vacancy
| 41 | 75 | 0.878049 | from src.data.load_dictionaries import load_dictionaries, create_key_skills
from src.data.load_vacancies import load_vacancies
from src.data.vacancy import Vacancy
| 0 | 0 | 0 |
993c6b3484d477798ccc852287106e2dcf851281 | 775 | py | Python | project/five_fold.py | jayson-garrison/ML-Naive-Bayes | 8026fae6413bc4f5b42d66eff8f1296a310b467c | [
"MIT"
] | 1 | 2022-01-18T01:58:24.000Z | 2022-01-18T01:58:24.000Z | project/five_fold.py | jayson-garrison/ML-Naive-Bayes | 8026fae6413bc4f5b42d66eff8f1296a310b467c | [
"MIT"
] | null | null | null | project/five_fold.py | jayson-garrison/ML-Naive-Bayes | 8026fae6413bc4f5b42d66eff8f1296a310b467c | [
"MIT"
] | null | null | null | """"
Author: Jayson C. Garrison
Dates: 01/19/2022
Course: CS-5333 (Machine Learning)
Description: Function that partitions data according to five fold cross validation
GitHub: https://github.com/jayson-garrison/ML-Naive-Bayes
"""
# partition a data set into a list of 5 tuples for training and testing
# five fold data partition | 28.703704 | 82 | 0.6 | """"
Author: Jayson C. Garrison
Dates: 01/19/2022
Course: CS-5333 (Machine Learning)
Description: Function that partitions data according to five fold cross validation
GitHub: https://github.com/jayson-garrison/ML-Naive-Bayes
"""
class FiveFold:
# partition a data set into a list of 5 tuples for training and testing
# five fold data partition
def five_fold(data_set):
partition_index = int( len(data_set) / 5 )
print('pdex: ', partition_index)
s = 0
fold = []
for i in range(5): #0-4
tr = data_set.copy()
n = s + partition_index # was -1
te = tr[s:n]
del tr[s:s + partition_index]
fold.append( (tr,te) )
s += partition_index
return fold | 396 | -6 | 48 |
57896a723fbc502aa7a96e61088aa5e895edfcd3 | 1,186 | py | Python | src/faber/config/try_compile.py | drmoose/faber | f447d58f2b42ad496c155cc8b3491379ac97c6f8 | [
"BSL-1.0"
] | 14 | 2017-05-27T00:18:24.000Z | 2021-09-11T03:51:02.000Z | src/faber/config/try_compile.py | drmoose/faber | f447d58f2b42ad496c155cc8b3491379ac97c6f8 | [
"BSL-1.0"
] | 26 | 2017-07-16T17:20:57.000Z | 2021-02-08T02:49:53.000Z | src/faber/config/try_compile.py | stefanseefeld/constructor | 0f369a8a9e4de305e5379d9662b2e79bffd43910 | [
"BSL-1.0"
] | 3 | 2018-05-24T13:52:40.000Z | 2020-06-30T16:46:26.000Z | #
# Copyright (c) 2016 Stefan Seefeld
# All rights reserved.
#
# This file is part of Faber. It is made available under the
# Boost Software License, Version 1.0.
# (Consult LICENSE or http://www.boost.org/LICENSE_1_0.txt)
from .check import check
from ..artefact import intermediate, always
from ..tools.compiler import compiler
from ..rule import rule, alias
from ..artefacts.object import object
class try_compile(check):
"""Try to compile a chunk of source code."""
| 33.885714 | 74 | 0.632378 | #
# Copyright (c) 2016 Stefan Seefeld
# All rights reserved.
#
# This file is part of Faber. It is made available under the
# Boost Software License, Version 1.0.
# (Consult LICENSE or http://www.boost.org/LICENSE_1_0.txt)
from .check import check
from ..artefact import intermediate, always
from ..tools.compiler import compiler
from ..rule import rule, alias
from ..artefacts.object import object
class try_compile(check):
"""Try to compile a chunk of source code."""
def __init__(self, name, source, type, features=(), if_=(), ifnot=()):
check.__init__(self, name, features, if_, ifnot)
compiler.check_instance_for_type(type, features)
if not self.cached:
# create source file
src = type.synthesize_name(self.name)
def generate(targets, _):
with open(targets[0]._filename, 'w') as os:
os.write(source)
src = rule(generate, src, attrs=intermediate|always,
logfile=self.logfile)
obj = object(self.name, src, attrs=intermediate,
features=self.features, logfile=self.logfile)
alias(self, obj)
| 682 | 0 | 27 |
400660bfe3de66710cc942a5b36698c94dc1b198 | 127 | py | Python | zenmailbox/apps.py | DevZenCat/django-zenmailbox | 3de636d39955130e7c129eaa31261ffc49f2f3fa | [
"MIT"
] | null | null | null | zenmailbox/apps.py | DevZenCat/django-zenmailbox | 3de636d39955130e7c129eaa31261ffc49f2f3fa | [
"MIT"
] | 4 | 2021-01-30T16:25:09.000Z | 2021-02-27T01:57:10.000Z | zenmailbox/apps.py | DevZenCat/django-zenmailbox | 3de636d39955130e7c129eaa31261ffc49f2f3fa | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 18.142857 | 34 | 0.748031 | from django.apps import AppConfig
class ZenmailboxConfig(AppConfig):
name = 'zenmailbox'
verbose_name = "Zenmailbox"
| 0 | 69 | 23 |
f18ac13ba65aed5e3dfba48ba08ef246fff78e5e | 3,354 | py | Python | pyscripts/analyse_swm_loc_trunc.py | luanfs/iModel | 8dd39e3564b6bc01df8dfe417493d9c7b22c40a8 | [
"MIT"
] | 10 | 2015-10-09T17:44:51.000Z | 2021-06-10T01:56:33.000Z | pyscripts/analyse_swm_loc_trunc.py | luanfs/iModel | 8dd39e3564b6bc01df8dfe417493d9c7b22c40a8 | [
"MIT"
] | 2 | 2018-08-29T11:49:32.000Z | 2021-03-18T18:07:53.000Z | pyscripts/analyse_swm_loc_trunc.py | luanfs/iModel | 8dd39e3564b6bc01df8dfe417493d9c7b22c40a8 | [
"MIT"
] | 14 | 2016-03-18T19:24:56.000Z | 2021-09-01T10:38:57.000Z | #! /usr/bin/env python3
#---------------------------------
# Plots errors of experiments
# obtained from iModel output
# Pedro Peixoto (ppeixoto@usp.br)
# Novembre 2018
#----------------------------------
import sys
import os
import re
import string
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
#Custom plotting setup
import imodel_plot
from imodel_plot import Plotter, PlotterPanel
import imodel_dict
from imodel_dict import Names, Filter
# input filename
input_filename = 'errors.txt'
if len(sys.argv) <= 1 :
print("I need 1 argument:")
print("A filename containing the errors generated by imodel")
sys.exit(1)
if len(sys.argv) > 1:
input_filename = sys.argv[1]
with open(input_filename) as f:
lines = f.readlines()
#get header
head = lines[0]
#print(head)
if head[-1] == '\n':
head = head[0:-1]
head = head.split()
print(head)
imethod = head.index("Methods")
ioperator = head.index("Operator")
igridname = head.index("Grid")
igridres = head.index("Mesh")
imaxerror = head.index("MaxError")
imaxerrorrel = head.index("MaxErrorRel")
irmserror = head.index("RMSError")
methods = []
operators = []
maxerrors = []
rmserrors = []
gridres = []
gridnames = []
for i, l in enumerate(lines[1:]):
if l[-1] == '\n': #get rid of \n
l = l[0:-1]
d = l.split() #line data
#print(d)
# skip invalid nan's
if d[imaxerror] == 'nan':
continue
operators.append(d[ioperator])
methods.append(d[imethod])
maxerrors.append(float(d[imaxerror]))
rmserrors.append(float(d[irmserror]))
gridres.append(int(d[igridres]))
gridnames.append((d[igridname].rstrip(string.digits)))
#Get unique operator names
operators_list=sorted(set(operators))
methods_list=sorted(set(methods))
grids_list=sorted(set(gridnames))
print("Full options available")
print(operators_list)
print(methods_list)
print(grids_list)
print("---------------------")
print()
dict=Names("naming_conv.csv") #Load naming convention
filter=Filter("filter.csv") #load filtering conditions
print(dict)
#Apply filters
operators_list=filter.select(operators_list, 'operator')
grids_list=filter.select(grids_list, 'grid')
methods_list=filter.select(methods_list, 'method')
#methods_list=filter.select(methods_list)
#grids_list=filter.select(grids_list)
print("Filtred lists")
print(operators_list)
print(methods_list)
print(grids_list)
print("---------------------")
print()
#Plot for each operator
for oper in operators_list:
outname=input_filename.replace('.txt', "_"+oper+".eps")
#outnamerms=input_filename.replace('.txt', "_"+oper+"_rms.eps")
title=dict.names.get(oper, oper)
figure = PlotterPanel( 2, title, ["grid points", "grid points"], ["max error", "rms error"])
#figurerms = Plotter(oper, "grid points", "rms error")
c = 0
for mtd in methods_list:
for grd in grids_list:
name=mtd+"_"+grd[0:-1]
name=dict.names.get(name, name)
x = []
ymax = []
yrms = []
for i, val in enumerate(maxerrors):
if operators[i] == oper and methods[i] == mtd and gridnames[i] == grd:
x.append(gridres[i])
ymax.append(maxerrors[i])
yrms.append(rmserrors[i])
figure.plot( 0, x, ymax, label=name, i=c)
figure.plot( 1, x, yrms, label=name, i=c)
c = c + 1
#plt.show()
figure.finish(outname)
#figurerms.finish(outnamerms)
plt.show()
| 23.131034 | 93 | 0.683363 | #! /usr/bin/env python3
#---------------------------------
# Plots errors of experiments
# obtained from iModel output
# Pedro Peixoto (ppeixoto@usp.br)
# Novembre 2018
#----------------------------------
import sys
import os
import re
import string
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
#Custom plotting setup
import imodel_plot
from imodel_plot import Plotter, PlotterPanel
import imodel_dict
from imodel_dict import Names, Filter
# input filename
input_filename = 'errors.txt'
if len(sys.argv) <= 1 :
print("I need 1 argument:")
print("A filename containing the errors generated by imodel")
sys.exit(1)
if len(sys.argv) > 1:
input_filename = sys.argv[1]
with open(input_filename) as f:
lines = f.readlines()
#get header
head = lines[0]
#print(head)
if head[-1] == '\n':
head = head[0:-1]
head = head.split()
print(head)
imethod = head.index("Methods")
ioperator = head.index("Operator")
igridname = head.index("Grid")
igridres = head.index("Mesh")
imaxerror = head.index("MaxError")
imaxerrorrel = head.index("MaxErrorRel")
irmserror = head.index("RMSError")
methods = []
operators = []
maxerrors = []
rmserrors = []
gridres = []
gridnames = []
for i, l in enumerate(lines[1:]):
if l[-1] == '\n': #get rid of \n
l = l[0:-1]
d = l.split() #line data
#print(d)
# skip invalid nan's
if d[imaxerror] == 'nan':
continue
operators.append(d[ioperator])
methods.append(d[imethod])
maxerrors.append(float(d[imaxerror]))
rmserrors.append(float(d[irmserror]))
gridres.append(int(d[igridres]))
gridnames.append((d[igridname].rstrip(string.digits)))
#Get unique operator names
operators_list=sorted(set(operators))
methods_list=sorted(set(methods))
grids_list=sorted(set(gridnames))
print("Full options available")
print(operators_list)
print(methods_list)
print(grids_list)
print("---------------------")
print()
dict=Names("naming_conv.csv") #Load naming convention
filter=Filter("filter.csv") #load filtering conditions
print(dict)
#Apply filters
operators_list=filter.select(operators_list, 'operator')
grids_list=filter.select(grids_list, 'grid')
methods_list=filter.select(methods_list, 'method')
#methods_list=filter.select(methods_list)
#grids_list=filter.select(grids_list)
print("Filtred lists")
print(operators_list)
print(methods_list)
print(grids_list)
print("---------------------")
print()
#Plot for each operator
for oper in operators_list:
outname=input_filename.replace('.txt', "_"+oper+".eps")
#outnamerms=input_filename.replace('.txt', "_"+oper+"_rms.eps")
title=dict.names.get(oper, oper)
figure = PlotterPanel( 2, title, ["grid points", "grid points"], ["max error", "rms error"])
#figurerms = Plotter(oper, "grid points", "rms error")
c = 0
for mtd in methods_list:
for grd in grids_list:
name=mtd+"_"+grd[0:-1]
name=dict.names.get(name, name)
x = []
ymax = []
yrms = []
for i, val in enumerate(maxerrors):
if operators[i] == oper and methods[i] == mtd and gridnames[i] == grd:
x.append(gridres[i])
ymax.append(maxerrors[i])
yrms.append(rmserrors[i])
figure.plot( 0, x, ymax, label=name, i=c)
figure.plot( 1, x, yrms, label=name, i=c)
c = c + 1
#plt.show()
figure.finish(outname)
#figurerms.finish(outnamerms)
plt.show()
| 0 | 0 | 0 |
59559039502c8f55ac9f29f68ebb611f4d96e2cd | 6,641 | py | Python | h/script.py | RichardLitt/h | f98d7e19a7913c82a3d3c43c226994366ebd3009 | [
"MIT"
] | null | null | null | h/script.py | RichardLitt/h | f98d7e19a7913c82a3d3c43c226994366ebd3009 | [
"MIT"
] | null | null | null | h/script.py | RichardLitt/h | f98d7e19a7913c82a3d3c43c226994366ebd3009 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from os import chdir, getcwd, makedirs, mkdir, walk
from os.path import abspath, exists, join
from shutil import copyfile, rmtree
from urlparse import urljoin, urlparse, urlunparse, uses_netloc, uses_relative
from chameleon.zpt.template import PageTextTemplateFile
from clik import App
from pyramid.config import Configurator
from pyramid.events import BeforeRender, ContextFound
from pyramid.paster import get_appsettings
from pyramid.path import AssetResolver
from pyramid.request import Request
from pyramid.scripting import prepare
from pyramid.view import render_view
from pyramid_basemodel import bind_engine
from sqlalchemy import engine_from_config
from h import __version__, api
version = __version__
description = """\
The Hypothes.is Project Annotation System
"""
command = App(
'hypothesis',
version=version,
description=description,
args_callback=get_config,
)
# Teach urlparse about extension schemes
uses_netloc.append('chrome-extension')
uses_relative.append('chrome-extension')
# Fetch an asset spec resolver
resolve = AssetResolver().resolve
@command(usage='CONFIG_FILE')
def init_db(settings):
"""Create the database models."""
store = api.store_from_settings(settings)
api.create_db(store)
engine = engine_from_config(settings, 'sqlalchemy.')
bind_engine(engine, should_create=True)
@command(usage='config_file')
def assets(settings):
"""Build the static assets."""
config = Configurator(settings=settings)
config.include('h.assets')
for bundle in config.get_webassets_env():
bundle.urls()
@command(usage='config_file base_url [static_url]')
def extension(args, console, settings):
"""Build the browser extensions.
The first argument is the base URL of an h installation:
http://localhost:5000
An optional second argument can be used to specify the location for static
assets.
Examples:
http://static.example.com/
chrome-extension://extensionid/public
"""
if len(args) == 1:
console.error('You must supply a url to the hosted backend.')
return 2
elif len(args) == 2:
assets_url = settings['webassets.base_url']
else:
settings['webassets.base_url'] = args[2]
assets_url = args[2]
base_url = args[1]
# Fully-qualify the static asset url
parts = urlparse(assets_url)
if not parts.netloc:
base = urlparse(base_url)
parts = (base.scheme, base.netloc,
parts.path, parts.params,
parts.query, parts.fragment)
assets_url = urlunparse(parts)
# Set up the assets url and source path mapping
settings['webassets.base_dir'] = abspath('./build/chrome/public')
settings['webassets.base_url'] = assets_url
settings['webassets.paths'] = json.dumps({
resolve('h:static').abspath(): assets_url
})
# Turn off the webassets cache and manifest
settings['webassets.cache'] = None
settings['webassets.manifest'] = None
config = Configurator(settings=settings)
config.include('h')
config.add_subscriber(add_base_url, BeforeRender)
config.commit()
# Build it
request = Request.blank('/app', base_url=base_url)
chrome(prepare(registry=config.registry, request=request))
# XXX: Change when webassets allows setting the cache option
# As of 0.10 it's only possible to pass a sass config with string values
rmtree('./build/chrome/public/.sass-cache')
main = command.main
| 29.780269 | 78 | 0.680922 | # -*- coding: utf-8 -*-
import json
from os import chdir, getcwd, makedirs, mkdir, walk
from os.path import abspath, exists, join
from shutil import copyfile, rmtree
from urlparse import urljoin, urlparse, urlunparse, uses_netloc, uses_relative
from chameleon.zpt.template import PageTextTemplateFile
from clik import App
from pyramid.config import Configurator
from pyramid.events import BeforeRender, ContextFound
from pyramid.paster import get_appsettings
from pyramid.path import AssetResolver
from pyramid.request import Request
from pyramid.scripting import prepare
from pyramid.view import render_view
from pyramid_basemodel import bind_engine
from sqlalchemy import engine_from_config
from h import __version__, api
def get_config(args):
settings = get_appsettings(args[0])
settings['basemodel.should_create_all'] = False
settings['basemodel.should_drop_all'] = False
settings['pyramid.includes'] = []
return dict(settings=settings)
version = __version__
description = """\
The Hypothes.is Project Annotation System
"""
command = App(
'hypothesis',
version=version,
description=description,
args_callback=get_config,
)
# Teach urlparse about extension schemes
uses_netloc.append('chrome-extension')
uses_relative.append('chrome-extension')
# Fetch an asset spec resolver
resolve = AssetResolver().resolve
def add_base_url(event):
request = event['request']
assets_env = request.webassets_env
view_name = getattr(request, 'view_name', None)
if view_name == 'embed.js' and not assets_env.url.startswith('http'):
base_url = join(request.webassets_env.url, '')
else:
base_url = request.resource_url(request.context, '')
event['base_url'] = base_url
def app(context, request):
with open('public/app.html', 'w') as f:
f.write(render_view(context, request, name='app.html'))
def embed(context, request):
setattr(request, 'view_name', 'embed.js')
with open('public/embed.js', 'w') as f:
f.write(render_view(context, request, name='embed.js'))
delattr(request, 'view_name')
def manifest(context, request):
# Chrome is strict about the format of the version string
ext_version = '.'.join(version.replace('-', '.').split('.')[:4])
assets_url = request.webassets_env.url
manifest_file = resolve('h:browser/chrome/manifest.json').abspath()
manifest_renderer = PageTextTemplateFile(manifest_file)
with open('manifest.json', 'w') as f:
src = urljoin(request.resource_url(context), assets_url)
f.write(manifest_renderer(src=src, version=ext_version))
def chrome(env):
registry = env['registry']
request = env['request']
context = request.context
registry.notify(ContextFound(request)) # pyramid_layout attrs
request.layout_manager.layout.csp = ''
# Remove any existing build
if exists('./build/chrome'):
rmtree('./build/chrome')
# Create the new build directory
makedirs('./build/chrome/public')
# Change to the output directory
old_dir = getcwd()
chdir('./build/chrome')
# Copy the extension code
merge('../../h/browser/chrome', './')
# Build the app html and copy assets if they are being bundled
if request.webassets_env.url.startswith('chrome-extension://'):
makedirs('./public/styles/images')
merge('../../h/static/styles/images', './public/styles/images')
merge('../../h/static/images', './public/images')
merge('../../h/static/fonts', './public/fonts')
# Copy over the vendor assets since they won't be processed otherwise
if request.webassets_env.debug:
makedirs('./public/scripts/vendor')
merge('../../h/static/scripts/vendor', './public/scripts/vendor')
app(context, request)
manifest(context, request)
embed(context, request)
# Reset the directory
chdir(old_dir)
def merge(src, dst):
for src_dir, _, files in walk(src):
dst_dir = src_dir.replace(src, dst)
if not exists(dst_dir):
mkdir(dst_dir)
for f in files:
src_file = join(src_dir, f)
dst_file = join(dst_dir, f)
copyfile(src_file, dst_file)
@command(usage='CONFIG_FILE')
def init_db(settings):
"""Create the database models."""
store = api.store_from_settings(settings)
api.create_db(store)
engine = engine_from_config(settings, 'sqlalchemy.')
bind_engine(engine, should_create=True)
@command(usage='config_file')
def assets(settings):
"""Build the static assets."""
config = Configurator(settings=settings)
config.include('h.assets')
for bundle in config.get_webassets_env():
bundle.urls()
@command(usage='config_file base_url [static_url]')
def extension(args, console, settings):
"""Build the browser extensions.
The first argument is the base URL of an h installation:
http://localhost:5000
An optional second argument can be used to specify the location for static
assets.
Examples:
http://static.example.com/
chrome-extension://extensionid/public
"""
if len(args) == 1:
console.error('You must supply a url to the hosted backend.')
return 2
elif len(args) == 2:
assets_url = settings['webassets.base_url']
else:
settings['webassets.base_url'] = args[2]
assets_url = args[2]
base_url = args[1]
# Fully-qualify the static asset url
parts = urlparse(assets_url)
if not parts.netloc:
base = urlparse(base_url)
parts = (base.scheme, base.netloc,
parts.path, parts.params,
parts.query, parts.fragment)
assets_url = urlunparse(parts)
# Set up the assets url and source path mapping
settings['webassets.base_dir'] = abspath('./build/chrome/public')
settings['webassets.base_url'] = assets_url
settings['webassets.paths'] = json.dumps({
resolve('h:static').abspath(): assets_url
})
# Turn off the webassets cache and manifest
settings['webassets.cache'] = None
settings['webassets.manifest'] = None
config = Configurator(settings=settings)
config.include('h')
config.add_subscriber(add_base_url, BeforeRender)
config.commit()
# Build it
request = Request.blank('/app', base_url=base_url)
chrome(prepare(registry=config.registry, request=request))
# XXX: Change when webassets allows setting the cache option
# As of 0.10 it's only possible to pass a sass config with string values
rmtree('./build/chrome/public/.sass-cache')
main = command.main
| 2,940 | 0 | 161 |
059cb2b4e53b363ecd3dc02f379199332064334e | 3,212 | py | Python | src/bnn_priors/bnn_priors/mcmc/hmc.py | activatedgeek/uncertainty-da-bayesian-classification | a270fb095f4790dea15327145897d09d0ba9c80b | [
"Apache-2.0"
] | 31 | 2021-02-16T09:35:03.000Z | 2022-03-31T17:18:54.000Z | src/bnn_priors/bnn_priors/mcmc/hmc.py | activatedgeek/understanding-bayesian-classification | a270fb095f4790dea15327145897d09d0ba9c80b | [
"Apache-2.0"
] | 1 | 2021-05-10T15:25:48.000Z | 2021-05-10T15:25:48.000Z | src/bnn_priors/bnn_priors/mcmc/hmc.py | activatedgeek/understanding-bayesian-classification | a270fb095f4790dea15327145897d09d0ba9c80b | [
"Apache-2.0"
] | 4 | 2021-02-21T03:38:00.000Z | 2021-12-24T15:13:29.000Z | import torch
import math
from typing import Sequence, Optional, Callable, Tuple, Dict, Union
import typing
from .sgld import dot
from .verlet_sgld import VerletSGLD
class HMC(VerletSGLD):
"""HMC with Verlet integration. Really `VerletSGLD` but with momentum=1 and
temperature=1, and a different M-H acceptance probability.
The user should call `sample_momentum` regularly to refresh the HMC momentum.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
num_data (int): the number of data points in this learning task
raise_on_no_grad (bool): whether to complain if a parameter does not
have a gradient
raise_on_nan: whether to complain if a gradient is not all finite.
"""
| 40.15 | 85 | 0.612702 | import torch
import math
from typing import Sequence, Optional, Callable, Tuple, Dict, Union
import typing
from .sgld import dot
from .verlet_sgld import VerletSGLD
class HMC(VerletSGLD):
"""HMC with Verlet integration. Really `VerletSGLD` but with momentum=1 and
temperature=1, and a different M-H acceptance probability.
The user should call `sample_momentum` regularly to refresh the HMC momentum.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
num_data (int): the number of data points in this learning task
raise_on_no_grad (bool): whether to complain if a parameter does not
have a gradient
raise_on_nan: whether to complain if a gradient is not all finite.
"""
def __init__(self, params: Sequence[Union[torch.nn.Parameter, Dict]],
lr: float, num_data: int,
raise_on_no_grad: bool=True, raise_on_nan: bool=True):
super().__init__(params, lr, num_data, 1., 1.,
raise_on_no_grad=raise_on_no_grad,
raise_on_nan=raise_on_nan)
def _point_energy(self, group, p, state):
return .5 * dot(state['momentum_buffer'], state['momentum_buffer'])
def _update_group_fn(self, g):
# Ensure momentum and temperature are correct at every step
# No matter what modifications are done before `self.step`.
super()._update_group_fn(g)
assert g['momentum'] == 1. and g['temperature'] == 1.
def _step_fn(self, group, p, state, is_initial=False, is_final=False,
save_state=False, calc_metrics=True):
if save_state:
self._save_state(group, p, state)
M_rsqrt = self._preconditioner_default(state, p)
momentum = state['momentum_buffer']
if is_initial:
mom_dot = dot(momentum, momentum)
# Subtract initial kinetic energy from delta_energy
state['delta_energy'] = -.5 * mom_dot
if calc_metrics:
state['est_temperature'] = mom_dot / p.numel()
if calc_metrics:
# Temperature diagnostics
d = p.numel()
if not is_final and not is_initial:
state['est_temperature'] = dot(momentum, momentum) / d
# NOTE: p and p.grad are from the same time step
state['est_config_temp'] = dot(p, p.grad) * (group['num_data']/d)
# Gradient step on the momentum
grad_lr = -.5 * group['grad_v'] * group['bhn'] * M_rsqrt
momentum.add_(p.grad, alpha=grad_lr)
if is_final:
if calc_metrics:
# If it is the final step, p and p.grad correspond to the same time
# step as the updated momentum
state['est_temperature'] = dot(momentum, momentum) / p.numel()
else:
# Update the parameters:
p.add_(momentum, alpha=group['bh']*M_rsqrt)
# RMSProp moving average
alpha = group['rmsprop_alpha']
state['square_avg'].mul_(alpha).addcmul_(p.grad, p.grad, value=1 - alpha)
| 2,253 | 0 | 107 |
4dfcd178b71eaa0a60e7f9386fde5faf6871fbbb | 3,874 | py | Python | diff_representation/vocab.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | 8 | 2021-03-15T18:57:18.000Z | 2021-08-23T11:28:22.000Z | diff_representation/vocab.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | null | null | null | diff_representation/vocab.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | 4 | 2021-03-27T14:19:09.000Z | 2021-09-13T12:35:31.000Z | #!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Usage:
vocab.py [options] TRAIN_FILE VOCAB_FILE
Options:
-h --help Show this screen.
--size=<int> vocab size [default: 10000]
--freq_cutoff=<int> frequency cutoff [default: 2]
"""
from collections import Counter
from itertools import chain
from docopt import docopt
import json
if __name__ == '__main__':
from diff_representation.dataset import DataSet
args = docopt(__doc__)
train_set = DataSet.load_from_jsonl(args['TRAIN_FILE'])
corpus = [change.previous_code_chunk + change.updated_code_chunk + change.context for change in train_set]
vocab_entry = VocabEntry.from_corpus(corpus, size=int(args['--size']), freq_cutoff=int(args['--freq_cutoff']))
print('built vocabulary %s' % vocab_entry)
# torch.save(vocab_entry, open(args['VOCAB_FILE'], 'wb'))
| 30.503937 | 117 | 0.593443 | #!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Usage:
vocab.py [options] TRAIN_FILE VOCAB_FILE
Options:
-h --help Show this screen.
--size=<int> vocab size [default: 10000]
--freq_cutoff=<int> frequency cutoff [default: 2]
"""
from collections import Counter
from itertools import chain
from docopt import docopt
import json
class VocabEntry:
def __init__(self):
self.word2id = dict()
self.unk_id = 3
self.word2id['<pad>'] = 0
self.word2id['<s>'] = 1
self.word2id['</s>'] = 2
self.word2id['<unk>'] = 3
self.id2word = {v: k for k, v in self.word2id.items()}
# insert 100 indexed unks
for i in range(100):
self.add('UNK_%d' % i)
def __getitem__(self, word):
return self.word2id.get(word, self.unk_id)
def is_unk(self, word):
return word not in self.word2id
def __contains__(self, word):
return word in self.word2id
def __setitem__(self, key, value):
raise ValueError('vocabulary is readonly')
def __len__(self):
return len(self.word2id)
def __repr__(self):
return 'Vocabulary[size=%d]' % len(self)
def id2word(self, wid):
return self.id2word[wid]
def add(self, word):
if word not in self:
wid = self.word2id[word] = len(self)
self.id2word[wid] = word
return wid
else:
return self[word]
def save(self, path):
params = dict(unk_id=self.unk_id, word2id=self.word2id, word_freq=self.word_freq)
json.dump(params, open(path, 'w'), indent=2)
@staticmethod
def load(path):
entry = VocabEntry()
params = json.load(open(path, 'r'))
setattr(entry, 'unk_id', params['unk_id'])
setattr(entry, 'word2id', params['word2id'])
setattr(entry, 'word_freq', params['word_freq'])
setattr(entry, 'id2word', {v: k for k, v in params['word2id'].items()})
return entry
@staticmethod
def from_corpus(corpus, size, freq_cutoff=0):
vocab_entry = VocabEntry()
word_freq = Counter(chain(*corpus))
freq_words = [w for w in word_freq if word_freq[w] >= freq_cutoff]
print('number of word types: %d, number of word types w/ frequency >= %d: %d' % (len(word_freq), freq_cutoff,
len(freq_words)))
top_k_words = sorted(word_freq, key=lambda x: (-word_freq[x], x))[:size]
print('top 10 words: %s' % ', '.join(top_k_words[:10]))
for word in top_k_words:
if len(vocab_entry) < size:
if word_freq[word] >= freq_cutoff:
vocab_entry.add(word)
# store the work frequency table in the
setattr(vocab_entry, 'word_freq', word_freq)
return vocab_entry
class Vocab(object):
def __init__(self, **kwargs):
self.entries = []
for key, item in kwargs.items():
assert isinstance(item, VocabEntry)
self.__setattr__(key, item)
self.entries.append(key)
def __repr__(self):
return 'Vocab(%s)' % (', '.join('%s %swords' % (entry, getattr(self, entry)) for entry in self.entries))
if __name__ == '__main__':
from diff_representation.dataset import DataSet
args = docopt(__doc__)
train_set = DataSet.load_from_jsonl(args['TRAIN_FILE'])
corpus = [change.previous_code_chunk + change.updated_code_chunk + change.context for change in train_set]
vocab_entry = VocabEntry.from_corpus(corpus, size=int(args['--size']), freq_cutoff=int(args['--freq_cutoff']))
print('built vocabulary %s' % vocab_entry)
# torch.save(vocab_entry, open(args['VOCAB_FILE'], 'wb'))
| 2,480 | 354 | 99 |
691d093f8e188fd2228b63061c9ed8aeb574df99 | 1,620 | py | Python | graphs/partitions_graph.py | jnfran92/adaptive-boxes | bcf03a91d48877b3a24125b74a233bda5bd8e044 | [
"MIT"
] | 7 | 2020-06-05T23:18:14.000Z | 2021-12-27T01:27:06.000Z | graphs/partitions_graph.py | jnfran92/adaptive-boxes | bcf03a91d48877b3a24125b74a233bda5bd8e044 | [
"MIT"
] | 3 | 2019-09-15T15:43:29.000Z | 2020-11-19T16:27:22.000Z | graphs/partitions_graph.py | jnfran92/adaptive-boxes | bcf03a91d48877b3a24125b74a233bda5bd8e044 | [
"MIT"
] | 1 | 2020-09-24T08:01:39.000Z | 2020-09-24T08:01:39.000Z |
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from networkx.readwrite import json_graph, write_gexf
from matplotlib import pylab
summary_groups_data_path = '/Users/Juan/django_projects/adaptive-boxes/graphs/partitions_data/hall/summary_groups.csv'
x_units_path = '/Users/Juan/django_projects/adaptive-boxes/graphs/partitions_data/hall/x_units.csv'
y_units_path = '/Users/Juan/django_projects/adaptive-boxes/graphs/partitions_data/hall/y_units.csv'
summary_groups = pd.read_csv(summary_groups_data_path)
x_units = pd.read_csv(x_units_path)
y_units = pd.read_csv(y_units_path)
# Creating Graphs
G = nx.Graph()
# n_total_nodes = summary_groups['n_partitions'].sum()
n_total_nodes = summary_groups.shape[0]
H = nx.path_graph(n_total_nodes)
G.add_nodes_from(H)
for idx, row in x_units.iterrows():
# print(row)
gi_0 = row['group_0']
gj_0 = row['partition_0']
gi_1 = row['group_1']
gj_1 = row['partition_1']
G.add_edge(gi_0, gi_1)
for idx, row in y_units.iterrows():
# print(row)
gi_0 = row['group_0']
gj_0 = row['partition_0']
gi_1 = row['group_1']
gj_1 = row['partition_1']
G.add_edge(gi_0, gi_1)
print(G.number_of_nodes())
print(G.number_of_edges())
options = {
'node_color': 'yellow',
'node_size': 80,
'edge_color': 'red',
'width': 0.5,
'font_size': 8,
'font_color': 'black',
}
# save_graph(G, "./my_graph.pdf")
# nx.draw(G, **options)
nx.draw(G, with_labels=True, **options)
plt.show()
nx.write_gexf(G, "/Users/Juan/django_projects/adaptive-boxes/graphs/gexf/hall.gexf")
# Info
print(nx.info(G))
| 23.478261 | 118 | 0.71358 |
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from networkx.readwrite import json_graph, write_gexf
from matplotlib import pylab
summary_groups_data_path = '/Users/Juan/django_projects/adaptive-boxes/graphs/partitions_data/hall/summary_groups.csv'
x_units_path = '/Users/Juan/django_projects/adaptive-boxes/graphs/partitions_data/hall/x_units.csv'
y_units_path = '/Users/Juan/django_projects/adaptive-boxes/graphs/partitions_data/hall/y_units.csv'
summary_groups = pd.read_csv(summary_groups_data_path)
x_units = pd.read_csv(x_units_path)
y_units = pd.read_csv(y_units_path)
# Creating Graphs
G = nx.Graph()
# n_total_nodes = summary_groups['n_partitions'].sum()
n_total_nodes = summary_groups.shape[0]
H = nx.path_graph(n_total_nodes)
G.add_nodes_from(H)
for idx, row in x_units.iterrows():
# print(row)
gi_0 = row['group_0']
gj_0 = row['partition_0']
gi_1 = row['group_1']
gj_1 = row['partition_1']
G.add_edge(gi_0, gi_1)
for idx, row in y_units.iterrows():
# print(row)
gi_0 = row['group_0']
gj_0 = row['partition_0']
gi_1 = row['group_1']
gj_1 = row['partition_1']
G.add_edge(gi_0, gi_1)
print(G.number_of_nodes())
print(G.number_of_edges())
options = {
'node_color': 'yellow',
'node_size': 80,
'edge_color': 'red',
'width': 0.5,
'font_size': 8,
'font_color': 'black',
}
# save_graph(G, "./my_graph.pdf")
# nx.draw(G, **options)
nx.draw(G, with_labels=True, **options)
plt.show()
nx.write_gexf(G, "/Users/Juan/django_projects/adaptive-boxes/graphs/gexf/hall.gexf")
# Info
print(nx.info(G))
| 0 | 0 | 0 |
8698e112f320339793a7016d5eccfaf6f1d310cc | 6,299 | py | Python | scripts/zMayaTools/bake_transform.py | fsanges/zMayaTools | 795168d497459b43439e03a55233320f90d8d11c | [
"MIT"
] | 1 | 2021-01-28T05:13:47.000Z | 2021-01-28T05:13:47.000Z | scripts/zMayaTools/bake_transform.py | fsanges/zMayaTools | 795168d497459b43439e03a55233320f90d8d11c | [
"MIT"
] | null | null | null | scripts/zMayaTools/bake_transform.py | fsanges/zMayaTools | 795168d497459b43439e03a55233320f90d8d11c | [
"MIT"
] | null | null | null | # Bake the transform on each frame from one transform to another.
#
# This is similar to the graph editor's keyframe bake (without any smart key reduction),
# but bakes one object to another. This is useful for baking a constraint to another
# transform.
#
# For example, you can constrain a locator to a character's hand, and then bake the
# locator to another locator. The second locator then has the position of the character's
# hand on each frame, so you can constrain other things to it without creating a DG
# dependency between the objects.
#
# This can also be used to bake history-dependent behavior like dynamics to keyframes.
import math, time
import pymel.core as pm
from maya import OpenMaya as om
from maya import OpenMayaAnim as oma
from zMayaTools import maya_helpers
# This isn't in the v1 API, but mixing it seems safe.
from maya.api.MDGContextGuard import MDGContextGuard
from zMayaTools import maya_logging
log = maya_logging.get_log()
| 42.560811 | 107 | 0.646611 | # Bake the transform on each frame from one transform to another.
#
# This is similar to the graph editor's keyframe bake (without any smart key reduction),
# but bakes one object to another. This is useful for baking a constraint to another
# transform.
#
# For example, you can constrain a locator to a character's hand, and then bake the
# locator to another locator. The second locator then has the position of the character's
# hand on each frame, so you can constrain other things to it without creating a DG
# dependency between the objects.
#
# This can also be used to bake history-dependent behavior like dynamics to keyframes.
import math, time
import pymel.core as pm
from maya import OpenMaya as om
from maya import OpenMayaAnim as oma
from zMayaTools import maya_helpers
# This isn't in the v1 API, but mixing it seems safe.
from maya.api.MDGContextGuard import MDGContextGuard
from zMayaTools import maya_logging
log = maya_logging.get_log()
def bake_transform(*args, **kwargs):
with maya_helpers.restores() as restores:
# Temporarily pause the viewport.
#
# This is only needed because the progress window needs to force a refresh, and refreshing
# makes this 4x slower if viewports are enabled.
restores.append(maya_helpers.SetAndRestorePauseViewport(True))
min_frame = int(pm.playbackOptions(q=True, min=True))
max_frame = int(pm.playbackOptions(q=True, max=True))
# This should be cancellable, but for some reason the cancel button callback
# never gets called.
with maya_helpers.ProgressWindowMaya(1, title='Baking keyframes',
with_secondary_progress=False, with_cancel=False) as progress:
return bake_transform_internal(min_frame, max_frame, progress=progress,
*args, **kwargs)
def bake_transform_internal(min_frame, max_frame,
position=True, rotation=True, scale=False, progress=None):
nodes = pm.ls(sl=True, type='transform')
if len(nodes) != 2:
log.info('Select a source and a destination transform')
return
src = nodes[0]
dst = nodes[1]
# Updating the progress window every frame is too slow, so we only update it
# every 10 frames.
update_progress_every = 10
total_frames = max_frame - min_frame + 1
progress.set_total_progress_value(total_frames*2 / update_progress_every)
# Make sure our target attributes aren't locked. (Can we check if they're writable,
# eg. disconnected or connected but writable?)
attributes_to_check = []
if position:
attributes_to_check.extend(('t', 'tx', 'ty', 'tz'))
if rotation:
attributes_to_check.extend(('r', 'rx', 'ry', 'rz'))
if scale:
attributes_to_check.extend(('s', 'sx', 'sy', 'sz'))
failed = False
for attr_name in attributes_to_check:
attr = dst.attr(attr_name)
if attr.get(lock=True):
log.error('Attribute %s is locked', attr)
failed = True
if failed:
return
# Match the transform to the target on each frame. Don't set keyframes while in
# an MDGContext (this confuses Maya badly). Just store the results.
mtime = om.MTime()
frame_range = range(min_frame, max_frame+1)
values = []
with maya_helpers.restores() as restores:
# Disable stepped preview while we do this.
restores.append(maya_helpers.SetAndRestoreCmd(pm.playbackOptions, key='blockingAnim', value=False))
# Temporarily disconnect any transform connections. If there are already keyframes
# connected, calling pm.matchTransform will have no effect. These connections will
# be restored when this restores() block exits.
def disconnect_attrs(attr):
for channel in ('x', 'y', 'z'):
restores.append(maya_helpers.SetAndRestoreAttr(dst.attr(attr + channel), 1))
restores.append(maya_helpers.SetAndRestoreAttr(dst.attr(attr), (1,1,1)))
if position: disconnect_attrs('t')
if rotation: disconnect_attrs('r')
if scale: disconnect_attrs('s')
# Read the position on each frame. We'll read all values, then write all results at once.
for frame in frame_range:
if (frame % update_progress_every) == 0:
progress.update()
mtime.setValue(frame)
with MDGContextGuard(om.MDGContext(mtime)) as guard:
pm.matchTransform(dst, src, pos=True, rot=True, scl=True)
# Store the resulting transform values.
values.append((dst.t.get(), dst.r.get(), dst.s.get()))
# Now that the above restores block has exited, any connections to the transform
# will be restored. Apply the transforms we stored now that we're no longer in
# an MDGContext.
with maya_helpers.restores() as restores:
# Disable auto-keyframe while we do this. Otherwise, a keyframe will also
# be added at the current frame.
restores.append(maya_helpers.SetAndRestoreCmd(pm.autoKeyframe, key='state', value=False))
current_frame = pm.currentTime(q=True)
# Set each destination node's transform on each frame.
for frame, (t, r, s) in zip(frame_range, values):
if (frame % update_progress_every) == 0:
progress.update()
# Work around some character set quirks. If we set a keyframe with
# pm.setKeyframe on the current frame, we need to also set it explicitly
# on the attribute too, or else the keyframe won't always have the
# correct value.
def set_keyframe(attr, value):
if frame == current_frame:
dst.attr(attr).set(value)
pm.setKeyframe(dst, at=attr, time=frame, value=value)
if position:
set_keyframe('tx', t[0])
set_keyframe('ty', t[1])
set_keyframe('tz', t[2])
if rotation:
set_keyframe('rx', r[0])
set_keyframe('ry', r[1])
set_keyframe('rz', r[2])
if scale:
set_keyframe('sx', s[0])
set_keyframe('sy', s[1])
set_keyframe('sz', s[2])
| 5,287 | 0 | 46 |
78f75ad9c04b23ec42185b40ad735003f1b7b9e5 | 4,233 | py | Python | deckbuilder/card/models.py | maxawolff/mtg-deckbuilder | 4ac7d0d7a0ed015dafd5143df0021182deb7cb01 | [
"MIT"
] | null | null | null | deckbuilder/card/models.py | maxawolff/mtg-deckbuilder | 4ac7d0d7a0ed015dafd5143df0021182deb7cb01 | [
"MIT"
] | null | null | null | deckbuilder/card/models.py | maxawolff/mtg-deckbuilder | 4ac7d0d7a0ed015dafd5143df0021182deb7cb01 | [
"MIT"
] | null | null | null | """Model of a single magic card."""
from django.db import models
from multiselectfield import MultiSelectField
from django.utils.text import slugify
class Set(models.Model):
"""Class for set model."""
name = models.CharField(max_length=50)
set_id = models.CharField(max_length=5, null=True, blank=True)
slug = models.SlugField(max_length=40, unique=True, blank=True, null=True)
# sealed_format = models.ManyToManyField('self')
big_set = models.ForeignKey('self', on_delete=models.CASCADE,
blank=True, null=True, related_name='big_sets')
small_set = models.ForeignKey('self', on_delete=models.CASCADE,
blank=True, null=True,
related_name='small_sets')
third_set = models.ForeignKey('self', on_delete=models.CASCADE,
blank=True, null=True,
related_name='third_sets')
def __str__(self):
"""Change how model is displayed when printed."""
return self.name
class Card(models.Model):
"""Class for card model."""
COLORS = (['W', 'White'], ['U', 'Blue'], ['B', 'Black'],
['R', 'Red'], ['G', 'Green'], ['C', 'Colorless'],
['N', 'Generic'])
RARITIES = (['M', 'Mythic-Rare'], ['R', 'Rare'],
['U', 'Uncommon'], ['C', 'Common'])
cmc = models.CharField(max_length=20)
colors = MultiSelectField(max_length=100, choices=COLORS)
image = models.ImageField(upload_to='images')
loyalty = models.IntegerField(null=True, blank=True)
mana_cost = models.CharField(max_length=20, null=True, blank=True)
name = models.CharField(max_length=30)
power = models.CharField(max_length=5, null=True, blank=True)
toughness = models.CharField(max_length=5, null=True, blank=True)
rarity = models.CharField(max_length=20, choices=RARITIES)
number = models.CharField(max_length=10)
card_text = models.CharField(max_length=600, blank=True, null=True)
card_type = models.CharField(max_length=50)
card_subtypes = models.CharField(max_length=50, blank=True, null=True)
from_set = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True)
number = models.CharField(max_length=10, null=True, blank=True)
slug = models.SlugField(max_length=40, unique=True, blank=True, null=True)
rares = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True, related_name='rares')
uncommons = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True,
related_name='uncommons')
commons = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True, related_name='commons')
mythics = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True, related_name='mythics')
back_side = models.OneToOneField('self', on_delete=models.CASCADE,
blank=True, null=True,
related_name='front_side')
in_pack = models.BooleanField(default=True)
def save(self, *args, **kwargs):
"""Overwrite save to generate slug."""
if not self.slug:
self.slug = self._get_unique_slug()
super().save()
def __str__(self):
"""Change how model is displayed when printed."""
return self.name
class Deck(models.Model):
"""Class for a deck model."""
FORMATS = (['ST', 'Standard'], ['SE', 'Sealed'], ['DR', 'Draft'])
name = models.CharField(max_length=50)
deck_format = models.CharField(max_length=50, choices=FORMATS)
card_list = models.ManyToManyField(Card)
# class Transform(models.Model):
# """Model for the fliped side of a trasnform card."""
# front =
| 41.910891 | 79 | 0.605245 | """Model of a single magic card."""
from django.db import models
from multiselectfield import MultiSelectField
from django.utils.text import slugify
class Set(models.Model):
"""Class for set model."""
name = models.CharField(max_length=50)
set_id = models.CharField(max_length=5, null=True, blank=True)
slug = models.SlugField(max_length=40, unique=True, blank=True, null=True)
# sealed_format = models.ManyToManyField('self')
big_set = models.ForeignKey('self', on_delete=models.CASCADE,
blank=True, null=True, related_name='big_sets')
small_set = models.ForeignKey('self', on_delete=models.CASCADE,
blank=True, null=True,
related_name='small_sets')
third_set = models.ForeignKey('self', on_delete=models.CASCADE,
blank=True, null=True,
related_name='third_sets')
def __str__(self):
"""Change how model is displayed when printed."""
return self.name
class Card(models.Model):
"""Class for card model."""
COLORS = (['W', 'White'], ['U', 'Blue'], ['B', 'Black'],
['R', 'Red'], ['G', 'Green'], ['C', 'Colorless'],
['N', 'Generic'])
RARITIES = (['M', 'Mythic-Rare'], ['R', 'Rare'],
['U', 'Uncommon'], ['C', 'Common'])
cmc = models.CharField(max_length=20)
colors = MultiSelectField(max_length=100, choices=COLORS)
image = models.ImageField(upload_to='images')
loyalty = models.IntegerField(null=True, blank=True)
mana_cost = models.CharField(max_length=20, null=True, blank=True)
name = models.CharField(max_length=30)
power = models.CharField(max_length=5, null=True, blank=True)
toughness = models.CharField(max_length=5, null=True, blank=True)
rarity = models.CharField(max_length=20, choices=RARITIES)
number = models.CharField(max_length=10)
card_text = models.CharField(max_length=600, blank=True, null=True)
card_type = models.CharField(max_length=50)
card_subtypes = models.CharField(max_length=50, blank=True, null=True)
from_set = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True)
number = models.CharField(max_length=10, null=True, blank=True)
slug = models.SlugField(max_length=40, unique=True, blank=True, null=True)
rares = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True, related_name='rares')
uncommons = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True,
related_name='uncommons')
commons = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True, related_name='commons')
mythics = models.ForeignKey(Set, on_delete=models.CASCADE,
blank=True, null=True, related_name='mythics')
back_side = models.OneToOneField('self', on_delete=models.CASCADE,
blank=True, null=True,
related_name='front_side')
in_pack = models.BooleanField(default=True)
def _get_unique_slug(self):
slug = slugify(self.name)
unique_slug = slug
num = 1
while Card.objects.filter(slug=unique_slug).exists():
unique_slug = '{}-{}'.format(slug, num)
num += 1
return unique_slug
def save(self, *args, **kwargs):
"""Overwrite save to generate slug."""
if not self.slug:
self.slug = self._get_unique_slug()
super().save()
def __str__(self):
"""Change how model is displayed when printed."""
return self.name
class Deck(models.Model):
"""Class for a deck model."""
FORMATS = (['ST', 'Standard'], ['SE', 'Sealed'], ['DR', 'Draft'])
name = models.CharField(max_length=50)
deck_format = models.CharField(max_length=50, choices=FORMATS)
card_list = models.ManyToManyField(Card)
# class Transform(models.Model):
# """Model for the fliped side of a trasnform card."""
# front =
| 245 | 0 | 27 |
0aebbaf8b359a3b4a74ff1c6e8498b0e0f6277fc | 1,431 | py | Python | .github/workflows/ensure_clean_notebooks.py | ICESAT-2HackWeek/website2022 | 10229090a94edb3e5734dbc00149b0502ad5396a | [
"MIT"
] | 8 | 2022-02-01T16:54:29.000Z | 2022-03-22T18:09:31.000Z | .github/workflows/ensure_clean_notebooks.py | ICESAT-2HackWeek/website2022 | 10229090a94edb3e5734dbc00149b0502ad5396a | [
"MIT"
] | 99 | 2022-01-27T22:01:05.000Z | 2022-03-31T19:42:28.000Z | .github/workflows/ensure_clean_notebooks.py | ICESAT-2HackWeek/website2022 | 10229090a94edb3e5734dbc00149b0502ad5396a | [
"MIT"
] | 25 | 2022-02-02T00:58:27.000Z | 2022-03-24T20:59:57.000Z | import yaml
import nb_clean as nbc
from pathlib import Path
import nbformat
import sys
with open('./book/_config.yml') as f:
data = yaml.safe_load(f)
# Sometimes we use rendered notebooks instead of executing them
exclude_paths = []
for pattern in data['execute']['exclude_patterns']:
exclude_paths += list(Path('book/tutorials').glob(pattern))
exclude_notebooks = [path.as_posix() for path in exclude_paths]
print('Excluded from execution:\n', '\n'.join(exclude_notebooks))
# Scrub outputs for spellcheck and linkcheck
for notebook in exclude_notebooks:
print(f'Scrubbing outputs: {notebook}...')
nb = nbformat.read(notebook, as_version=nbformat.NO_CONVERT)
cleaned = nbc.clean_notebook(nb,
remove_empty_cells=True,
preserve_cell_metadata=True)
nbformat.write(cleaned, notebook)
all_ipynbs = [path.as_posix() for path in Path('book/tutorials').rglob('*.ipynb')]
ipynbs = [p for p in all_ipynbs if not '.ipynb_checkpoints' in p]
results = []
for notebook in ipynbs:
#if not notebook in exclude_notebooks:
print(f'Checking {notebook}...')
nb = nbformat.read(notebook, as_version=nbformat.NO_CONVERT)
result = nbc.check_notebook(nb,
remove_empty_cells=True,
preserve_cell_metadata=True)
results.append(result)
if False in results:
sys.exit(1) | 34.902439 | 82 | 0.675751 | import yaml
import nb_clean as nbc
from pathlib import Path
import nbformat
import sys
with open('./book/_config.yml') as f:
data = yaml.safe_load(f)
# Sometimes we use rendered notebooks instead of executing them
exclude_paths = []
for pattern in data['execute']['exclude_patterns']:
exclude_paths += list(Path('book/tutorials').glob(pattern))
exclude_notebooks = [path.as_posix() for path in exclude_paths]
print('Excluded from execution:\n', '\n'.join(exclude_notebooks))
# Scrub outputs for spellcheck and linkcheck
for notebook in exclude_notebooks:
print(f'Scrubbing outputs: {notebook}...')
nb = nbformat.read(notebook, as_version=nbformat.NO_CONVERT)
cleaned = nbc.clean_notebook(nb,
remove_empty_cells=True,
preserve_cell_metadata=True)
nbformat.write(cleaned, notebook)
all_ipynbs = [path.as_posix() for path in Path('book/tutorials').rglob('*.ipynb')]
ipynbs = [p for p in all_ipynbs if not '.ipynb_checkpoints' in p]
results = []
for notebook in ipynbs:
#if not notebook in exclude_notebooks:
print(f'Checking {notebook}...')
nb = nbformat.read(notebook, as_version=nbformat.NO_CONVERT)
result = nbc.check_notebook(nb,
remove_empty_cells=True,
preserve_cell_metadata=True)
results.append(result)
if False in results:
sys.exit(1) | 0 | 0 | 0 |
30f3acb68606b47810d80f6613338364bf126a6e | 75,314 | py | Python | gerenciador_operacoes.py | David-Machado-Git/LABS---PROGRAMA-HAVAN | c084d8d1b3ba8cca726663cc1400149c3d34a64f | [
"MIT"
] | null | null | null | gerenciador_operacoes.py | David-Machado-Git/LABS---PROGRAMA-HAVAN | c084d8d1b3ba8cca726663cc1400149c3d34a64f | [
"MIT"
] | null | null | null | gerenciador_operacoes.py | David-Machado-Git/LABS---PROGRAMA-HAVAN | c084d8d1b3ba8cca726663cc1400149c3d34a64f | [
"MIT"
] | 1 | 2021-06-24T20:39:03.000Z | 2021-06-24T20:39:03.000Z | from time import sleep
dados = {} ### --> DICIONÁRIO RECEBE TODOS OS DADOS COM SEUS RESPECTIVOS VALORES; ID, NOME ETC...
lista_de_dados = []
lista_principal = []
copia_dados = []### --> DICIONÁRIO RECEBE TODOS OS DADOS
codigo_cliente = 0 ### --> CONTADOR CONTAGEM TRANSAÇÕES
moeda_origem_sigla = 'R$:'
moeda_destino_sigla = 'U$$:'
valor_tot_operacoes = float(0)
valor_tot_operacoes_a = float(0)
tot_taxas = float(0)
tot_movimento_brasil_dolar_eua = float(0)
tot_movimento_brasil_euro = float(0)
tot_movimento_brasil_dolar_canada = float(0)
tot_movimento_dolar_eua_brasil = float(0)
tot_movimento_dolar_eua_euro = float(0)
tot_movimento_dolar_eua_dolar_canada = float(0)
tot_movimento_euro_brasil = float(0)
tot_movimento_euro_dolar_eua = float(0)
tot_movimento_euro_dolar_canada = float(0)
tot_movimento_dolar_canada_brasil = float(0)
tot_movimento_dolar_canada_dolar_eua = float(0)
tot_movimento_dolar_canada_euro = float(0)
print('--' * 35)
print(f'\033[7;40m{" GERENCIADOR DE OPERAÇÕES ":*^70}\033[0;0m')
print('--' * 35)
while True:
print(f' |-- {"OPÇÃO ":-<2}{" MENU ":-^38} |')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |\033[7;40m{" -> 1 <-":.<3}|{" - CADASTROS - CLIENTES - OPERAÇÕES ":^38} \033[0;0m|')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |{" -> 2 <-":.<3}|{" - LISTAR OPERAÇÕES - ":^38} |')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |\033[7;40m{" -> 3 <-":.<3}|{" - VALOR TOTAL DAS OPERAÇÕES - ":^38} \033[0;0m|')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |{" -> 4 <-":.<3}|{" - VALOR TOTAL DAS TAXAS COBRADAS - ":^38} |')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print('--' * 35)
print(f'\033[7;40m{" ESCOLHA UMA DAS OPÇÕES ACIMA: ":*^70}\033[0;0m')
print('--' * 35)
opcao_menu = str(input('Digite a opção desejada:?'))
if opcao_menu.isnumeric():
if opcao_menu == '1':
while True:
print('--' * 35)
print(f'\033[7;40m{" CADASTRAR CLIENTES ":*^70}\033[0;0m')
print('--' * 35)
codigo_cliente += 1
codigo_cliente_convertida = str(codigo_cliente)
print(f' --> {codigo_cliente}º CLIENTE - ORDEM DE SERVIÇO DE Nº [ {codigo_cliente_convertida} ]')
dados['Cód'] = [codigo_cliente]
lista_de_dados.append(codigo_cliente_convertida)
print('--' * 35)
nome = str(input('Digite o nome do cliente:?')).strip().upper()
dados['Nome'] = [nome]
lista_de_dados.append(nome)
# print(f'TESTE DADOS: {dados}')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
dados['Moeda origem'] = [moeda_origem]
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
else:
while True:
print(f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
break
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
break
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
else:
while True:
print(f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
break
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
break
print('--' * 30)
data_operacao = str(input('Data da operação:\033[1;90m[NO FORMATO: __/__/____ ]\033[0;0m:?'))
dados['Data Operação'] = [data_operacao]
lista_de_dados.append(data_operacao)
print('--' * 30)
valor_original = str(input(f'Valor original:? {moeda_origem_sigla}'))
dados['Valor Original'] = [valor_original]
lista_de_dados.append(valor_original)
convertendo_valor = float(valor_original)
valor_tot_operacoes += convertendo_valor
if moeda_origem == '1' and moeda_destino == '2':
convertendo_valor_brasil = float(valor_original)
tot_movimento_brasil_dolar_eua += convertendo_valor_brasil
elif moeda_origem == '1' and moeda_destino == '3':
convertendo_valor_brasil_euro = float(valor_original)
tot_movimento_brasil_euro += convertendo_valor_brasil_euro
elif moeda_origem == '1' and moeda_destino == '4':
convertendo_valor_brasil_dolar_canada = float(valor_original)
tot_movimento_brasil_dolar_canada += convertendo_valor_brasil_dolar_canada
if moeda_origem == '2' and moeda_destino == '1':
convertendo_valor_eua_brasil = float(valor_original)
tot_movimento_dolar_eua_brasil += convertendo_valor_eua_brasil
elif moeda_origem == '2' and moeda_destino == '3':
convertendo_valor_eua_euro = float(valor_original)
tot_movimento_dolar_eua_euro += convertendo_valor_eua_euro
elif moeda_origem == '2' and moeda_destino == '4':
convertendo_valor_eua_canada = float(valor_original)
tot_movimento_dolar_eua_dolar_canada += convertendo_valor_eua_canada
if moeda_origem == '3' and moeda_destino == '1':
convertendo_valor_euro_brasil = float(valor_original)
tot_movimento_euro_brasil += convertendo_valor_euro_brasil
elif moeda_origem == '3' and moeda_destino == '2':
convertendo_valor_euro_eua = float(valor_original)
tot_movimento_euro_dolar_eua += convertendo_valor_euro_eua
elif moeda_origem == '3' and moeda_destino == '4':
convertendo_valor_euro_canada = float(valor_original)
tot_movimento_euro_dolar_canada += convertendo_valor_euro_canada
if moeda_origem == '4' and moeda_destino == '1':
convertendo_valor_canada_brasil = float(valor_original)
tot_movimento_dolar_canada_brasil += convertendo_valor_canada_brasil
elif moeda_origem == '4' and moeda_destino == '2':
convertendo_valor_canada_eua = float(valor_original)
tot_movimento_dolar_canada_dolar_eua += convertendo_valor_canada_eua
elif moeda_origem == '4' and moeda_destino == '3':
convertendo_valor_canada_euro = float(valor_original)
tot_movimento_dolar_canada_euro += convertendo_valor_canada_euro
print('--' * 30)
valor_convertido = str(input(f'Valor convertido:? {moeda_destino_sigla}'))
dados['Valor Convertido'] = [valor_convertido]
lista_de_dados.append(valor_convertido)
print('--' * 30)
taxa_cobrada = str(input(f'Taxa cobrada:? R$:'))
conversao_taxa = float(taxa_cobrada)
tot_taxas += conversao_taxa
dados['Taxa Cobrada'] = [taxa_cobrada]
lista_de_dados.append(taxa_cobrada)
print('--' * 30)
copia_dados.append(dados.copy())
lista_principal.append(lista_de_dados)
print('--' * 30)
print('\033[1;90m----------------------- FIM CADASTRO -----------------------\033[0;0m')
print('--' * 30)
while True:
continua_cadastro = str(input('\nCadastrar mais clientes:? [C - P/ CONTINUAR OU S - P/ SAIR]:?')).strip().upper()[0]
print('')
if continua_cadastro == 'C':
break
elif continua_cadastro == 'S':
break
else:
print(f'\033[1;41m- SOMENTE UMA DAS OPÇÕES ACIMA: [C - CONTINUAR/ S - SAIR]:\033[0;0m')
if continua_cadastro == 'S':
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
break
elif opcao_menu == '2':
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS ":*^70}\033[0;0m')
print('--' * 35)
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m',end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
print('--' * 35)
print(' -- | ABAIXO SEGUE A LISTA COMPLETA DE TODOS OS REGISTROS ATÉ O MOMENTO! | -- ')
print('--' * 35)
print(' -------------------- |SEQUÊNCIA E ORDEM DE COLUNAS| -------------- VALORES -------------')
print('1ºCÓD:|2ºNOME: |3ºMOED ORIG. |4º MOED DEST. |5º DATA: |6ºORIGI:|7ºCONV:|8ºTAXA. ')
print('---' * 30)
for c, v in enumerate(copia_dados):
for d in v.values():
print(f" {str(d).replace('[', '').replace(']', '').replace('', '')}", end=' ')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '3':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DAS OPERAÇÕES: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '4':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DE TAXAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO REGISTRADA!\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES REGISTRADAS!\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print(f'SOMANDO TODAS AS ENTRADAS EM TAXAS: TEMOS UM TOTAL DÊ R$: {tot_taxas:.2f} REAIS.')
print(f'AS MESMAS SÃO REFERENTE AS DEVIDAS CONVERSÕES ABAIXO:')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
else:
while True:
print('--' * 30)
print(f'\033[1;41m- SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS OPÇÕES DO MENU:\033[0;0m')
print('--' * 30)
opcao_menu = str(input('Digite a opção desejada:?'))
print('--' * 30)
else:
while True:
print('--' * 30)
print(f'\033[1;41m- SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS OPÇÕES DO MENU:\033[0;0m')
print('--' * 30)
opcao_menu = str(input('Digite a opção desejada:?'))
print('--' * 30)
if opcao_menu == '1':
while True:
print('--' * 35)
print(f'\033[7;40m{" CADASTRAR CLIENTES ":*^70}\033[0;0m')
print('--' * 35)
codigo_cliente += 1
codigo_cliente_convertida = str(codigo_cliente)
print(f' --> {codigo_cliente}º CLIENTE - ORDEM DE SERVIÇO DE Nº [ {codigo_cliente_convertida} ]')
dados['Cód'] = [codigo_cliente]
lista_de_dados.append(codigo_cliente_convertida)
print('--' * 35)
nome = str(input('Digite o nome do cliente:?')).strip().upper()
dados['Nome'] = [nome]
lista_de_dados.append(nome)
# print(f'TESTE DADOS: {dados}')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
dados['Moeda origem'] = [moeda_origem]
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
else:
while True:
print(
f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
break
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
break
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
else:
while True:
print(
f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
break
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
break
print('--' * 30)
data_operacao = str(input('Data da operação:\033[1;90m[NO FORMATO: __/__/____ ]\033[0;0m:?'))
dados['Data Operação'] = [data_operacao]
lista_de_dados.append(data_operacao)
print('--' * 30)
valor_original = str(input(f'Valor original:? {moeda_origem_sigla}'))
dados['Valor Original'] = [valor_original]
lista_de_dados.append(valor_original)
convertendo_valor = float(valor_original)
valor_tot_operacoes += convertendo_valor
if moeda_origem == '1' and moeda_destino == '2':
convertendo_valor_brasil = float(valor_original)
tot_movimento_brasil_dolar_eua += convertendo_valor_brasil
elif moeda_origem == '1' and moeda_destino == '3':
convertendo_valor_brasil_euro = float(valor_original)
tot_movimento_brasil_euro += convertendo_valor_brasil_euro
elif moeda_origem == '1' and moeda_destino == '4':
convertendo_valor_brasil_dolar_canada = float(valor_original)
tot_movimento_brasil_dolar_canada += convertendo_valor_brasil_dolar_canada
if moeda_origem == '2' and moeda_destino == '1':
convertendo_valor_eua_brasil = float(valor_original)
tot_movimento_dolar_eua_brasil += convertendo_valor_eua_brasil
elif moeda_origem == '2' and moeda_destino == '3':
convertendo_valor_eua_euro = float(valor_original)
tot_movimento_dolar_eua_euro += convertendo_valor_eua_euro
elif moeda_origem == '2' and moeda_destino == '4':
convertendo_valor_eua_canada = float(valor_original)
tot_movimento_dolar_eua_dolar_canada += convertendo_valor_eua_canada
if moeda_origem == '3' and moeda_destino == '1':
convertendo_valor_euro_brasil = float(valor_original)
tot_movimento_euro_brasil += convertendo_valor_euro_brasil
elif moeda_origem == '3' and moeda_destino == '2':
convertendo_valor_euro_eua = float(valor_original)
tot_movimento_euro_dolar_eua += convertendo_valor_euro_eua
elif moeda_origem == '3' and moeda_destino == '4':
convertendo_valor_euro_canada = float(valor_original)
tot_movimento_euro_dolar_canada += convertendo_valor_euro_canada
if moeda_origem == '4' and moeda_destino == '1':
convertendo_valor_canada_brasil = float(valor_original)
tot_movimento_dolar_canada_brasil += convertendo_valor_canada_brasil
elif moeda_origem == '4' and moeda_destino == '2':
convertendo_valor_canada_eua = float(valor_original)
tot_movimento_dolar_canada_dolar_eua += convertendo_valor_canada_eua
elif moeda_origem == '4' and moeda_destino == '3':
convertendo_valor_canada_euro = float(valor_original)
tot_movimento_dolar_canada_euro += convertendo_valor_canada_euro
print('--' * 30)
valor_convertido = str(input(f'Valor convertido:? {moeda_destino_sigla}'))
dados['Valor Convertido'] = [valor_convertido]
lista_de_dados.append(valor_convertido)
print('--' * 30)
taxa_cobrada = str(input(f'Taxa cobrada:? R$:'))
conversao_taxa = float(taxa_cobrada)
tot_taxas += conversao_taxa
dados['Taxa Cobrada'] = [taxa_cobrada]
lista_de_dados.append(taxa_cobrada)
print('--' * 30)
copia_dados.append(dados.copy())
lista_principal.append(lista_de_dados)
print('--' * 30)
print('\033[1;90m----------------------- FIM CADASTRO -----------------------\033[0;0m')
print('--' * 30)
while True:
continua_cadastro = \
str(input('\nCadastrar mais clientes:? [C - P/ CONTINUAR OU S - P/ SAIR]:?')).strip().upper()[0]
print('')
if continua_cadastro == 'C':
break
elif continua_cadastro == 'S':
break
else:
print(f'\033[1;41m- SOMENTE UMA DAS OPÇÕES ACIMA: [C - CONTINUAR/ S - SAIR]:\033[0;0m')
if continua_cadastro == 'S':
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
break
elif opcao_menu == '2':
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS ":*^70}\033[0;0m')
print('--' * 35)
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
print('--' * 35)
print(' -- | ABAIXO SEGUE A LISTA COMPLETA DE TODOS OS REGISTROS ATÉ O MOMENTO! | -- ')
print('--' * 35)
print(' -------------------- |SEQUÊNCIA E ORDEM DE COLUNAS| -------------- VALORES -------------')
print('1ºCÓD:|2ºNOME: |3ºMOED ORIG. |4º MOED DEST. |5º DATA: |6ºORIGI:|7ºCONV:|8ºTAXA. ')
print('---' * 30)
for c, v in enumerate(copia_dados):
for d in v.values():
print(f" {str(d).replace('[', '').replace(']', '').replace('', '')}", end=' ')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '3':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DAS OPERAÇÕES: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(
f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(
f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(
f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(
f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(
f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '4':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DE TAXAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO REGISTRADA!\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES REGISTRADAS!\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print(f'SOMANDO TODAS AS ENTRADAS EM TAXAS: TEMOS UM TOTAL DÊ R$: {tot_taxas:.2f} REAIS.')
print(f'AS MESMAS SÃO REFERENTE AS DEVIDAS CONVERSÕES ABAIXO:')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(
f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(
f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(
f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(
f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(
f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
| 50.580255 | 148 | 0.42036 | from time import sleep
dados = {} ### --> DICIONÁRIO RECEBE TODOS OS DADOS COM SEUS RESPECTIVOS VALORES; ID, NOME ETC...
lista_de_dados = []
lista_principal = []
copia_dados = []### --> DICIONÁRIO RECEBE TODOS OS DADOS
codigo_cliente = 0 ### --> CONTADOR CONTAGEM TRANSAÇÕES
moeda_origem_sigla = 'R$:'
moeda_destino_sigla = 'U$$:'
valor_tot_operacoes = float(0)
valor_tot_operacoes_a = float(0)
tot_taxas = float(0)
tot_movimento_brasil_dolar_eua = float(0)
tot_movimento_brasil_euro = float(0)
tot_movimento_brasil_dolar_canada = float(0)
tot_movimento_dolar_eua_brasil = float(0)
tot_movimento_dolar_eua_euro = float(0)
tot_movimento_dolar_eua_dolar_canada = float(0)
tot_movimento_euro_brasil = float(0)
tot_movimento_euro_dolar_eua = float(0)
tot_movimento_euro_dolar_canada = float(0)
tot_movimento_dolar_canada_brasil = float(0)
tot_movimento_dolar_canada_dolar_eua = float(0)
tot_movimento_dolar_canada_euro = float(0)
print('--' * 35)
print(f'\033[7;40m{" GERENCIADOR DE OPERAÇÕES ":*^70}\033[0;0m')
print('--' * 35)
while True:
print(f' |-- {"OPÇÃO ":-<2}{" MENU ":-^38} |')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |\033[7;40m{" -> 1 <-":.<3}|{" - CADASTROS - CLIENTES - OPERAÇÕES ":^38} \033[0;0m|')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |{" -> 2 <-":.<3}|{" - LISTAR OPERAÇÕES - ":^38} |')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |\033[7;40m{" -> 3 <-":.<3}|{" - VALOR TOTAL DAS OPERAÇÕES - ":^38} \033[0;0m|')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print(f' |{" -> 4 <-":.<3}|{" - VALOR TOTAL DAS TAXAS COBRADAS - ":^38} |')
print(f' |\033[1;90m---------------------------------------------------------- \033[0;0m|')
print('--' * 35)
print(f'\033[7;40m{" ESCOLHA UMA DAS OPÇÕES ACIMA: ":*^70}\033[0;0m')
print('--' * 35)
opcao_menu = str(input('Digite a opção desejada:?'))
if opcao_menu.isnumeric():
if opcao_menu == '1':
while True:
print('--' * 35)
print(f'\033[7;40m{" CADASTRAR CLIENTES ":*^70}\033[0;0m')
print('--' * 35)
codigo_cliente += 1
codigo_cliente_convertida = str(codigo_cliente)
print(f' --> {codigo_cliente}º CLIENTE - ORDEM DE SERVIÇO DE Nº [ {codigo_cliente_convertida} ]')
dados['Cód'] = [codigo_cliente]
lista_de_dados.append(codigo_cliente_convertida)
print('--' * 35)
nome = str(input('Digite o nome do cliente:?')).strip().upper()
dados['Nome'] = [nome]
lista_de_dados.append(nome)
# print(f'TESTE DADOS: {dados}')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
dados['Moeda origem'] = [moeda_origem]
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
else:
while True:
print(f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
break
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
break
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
else:
while True:
print(f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
break
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
break
print('--' * 30)
data_operacao = str(input('Data da operação:\033[1;90m[NO FORMATO: __/__/____ ]\033[0;0m:?'))
dados['Data Operação'] = [data_operacao]
lista_de_dados.append(data_operacao)
print('--' * 30)
valor_original = str(input(f'Valor original:? {moeda_origem_sigla}'))
dados['Valor Original'] = [valor_original]
lista_de_dados.append(valor_original)
convertendo_valor = float(valor_original)
valor_tot_operacoes += convertendo_valor
if moeda_origem == '1' and moeda_destino == '2':
convertendo_valor_brasil = float(valor_original)
tot_movimento_brasil_dolar_eua += convertendo_valor_brasil
elif moeda_origem == '1' and moeda_destino == '3':
convertendo_valor_brasil_euro = float(valor_original)
tot_movimento_brasil_euro += convertendo_valor_brasil_euro
elif moeda_origem == '1' and moeda_destino == '4':
convertendo_valor_brasil_dolar_canada = float(valor_original)
tot_movimento_brasil_dolar_canada += convertendo_valor_brasil_dolar_canada
if moeda_origem == '2' and moeda_destino == '1':
convertendo_valor_eua_brasil = float(valor_original)
tot_movimento_dolar_eua_brasil += convertendo_valor_eua_brasil
elif moeda_origem == '2' and moeda_destino == '3':
convertendo_valor_eua_euro = float(valor_original)
tot_movimento_dolar_eua_euro += convertendo_valor_eua_euro
elif moeda_origem == '2' and moeda_destino == '4':
convertendo_valor_eua_canada = float(valor_original)
tot_movimento_dolar_eua_dolar_canada += convertendo_valor_eua_canada
if moeda_origem == '3' and moeda_destino == '1':
convertendo_valor_euro_brasil = float(valor_original)
tot_movimento_euro_brasil += convertendo_valor_euro_brasil
elif moeda_origem == '3' and moeda_destino == '2':
convertendo_valor_euro_eua = float(valor_original)
tot_movimento_euro_dolar_eua += convertendo_valor_euro_eua
elif moeda_origem == '3' and moeda_destino == '4':
convertendo_valor_euro_canada = float(valor_original)
tot_movimento_euro_dolar_canada += convertendo_valor_euro_canada
if moeda_origem == '4' and moeda_destino == '1':
convertendo_valor_canada_brasil = float(valor_original)
tot_movimento_dolar_canada_brasil += convertendo_valor_canada_brasil
elif moeda_origem == '4' and moeda_destino == '2':
convertendo_valor_canada_eua = float(valor_original)
tot_movimento_dolar_canada_dolar_eua += convertendo_valor_canada_eua
elif moeda_origem == '4' and moeda_destino == '3':
convertendo_valor_canada_euro = float(valor_original)
tot_movimento_dolar_canada_euro += convertendo_valor_canada_euro
print('--' * 30)
valor_convertido = str(input(f'Valor convertido:? {moeda_destino_sigla}'))
dados['Valor Convertido'] = [valor_convertido]
lista_de_dados.append(valor_convertido)
print('--' * 30)
taxa_cobrada = str(input(f'Taxa cobrada:? R$:'))
conversao_taxa = float(taxa_cobrada)
tot_taxas += conversao_taxa
dados['Taxa Cobrada'] = [taxa_cobrada]
lista_de_dados.append(taxa_cobrada)
print('--' * 30)
copia_dados.append(dados.copy())
lista_principal.append(lista_de_dados)
print('--' * 30)
print('\033[1;90m----------------------- FIM CADASTRO -----------------------\033[0;0m')
print('--' * 30)
while True:
continua_cadastro = str(input('\nCadastrar mais clientes:? [C - P/ CONTINUAR OU S - P/ SAIR]:?')).strip().upper()[0]
print('')
if continua_cadastro == 'C':
break
elif continua_cadastro == 'S':
break
else:
print(f'\033[1;41m- SOMENTE UMA DAS OPÇÕES ACIMA: [C - CONTINUAR/ S - SAIR]:\033[0;0m')
if continua_cadastro == 'S':
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
break
elif opcao_menu == '2':
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS ":*^70}\033[0;0m')
print('--' * 35)
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m',end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
print('--' * 35)
print(' -- | ABAIXO SEGUE A LISTA COMPLETA DE TODOS OS REGISTROS ATÉ O MOMENTO! | -- ')
print('--' * 35)
print(' -------------------- |SEQUÊNCIA E ORDEM DE COLUNAS| -------------- VALORES -------------')
print('1ºCÓD:|2ºNOME: |3ºMOED ORIG. |4º MOED DEST. |5º DATA: |6ºORIGI:|7ºCONV:|8ºTAXA. ')
print('---' * 30)
for c, v in enumerate(copia_dados):
for d in v.values():
print(f" {str(d).replace('[', '').replace(']', '').replace('', '')}", end=' ')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '3':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DAS OPERAÇÕES: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '4':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DE TAXAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO REGISTRADA!\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES REGISTRADAS!\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print(f'SOMANDO TODAS AS ENTRADAS EM TAXAS: TEMOS UM TOTAL DÊ R$: {tot_taxas:.2f} REAIS.')
print(f'AS MESMAS SÃO REFERENTE AS DEVIDAS CONVERSÕES ABAIXO:')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
else:
while True:
print('--' * 30)
print(f'\033[1;41m- SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS OPÇÕES DO MENU:\033[0;0m')
print('--' * 30)
opcao_menu = str(input('Digite a opção desejada:?'))
print('--' * 30)
else:
while True:
print('--' * 30)
print(f'\033[1;41m- SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS OPÇÕES DO MENU:\033[0;0m')
print('--' * 30)
opcao_menu = str(input('Digite a opção desejada:?'))
print('--' * 30)
if opcao_menu == '1':
while True:
print('--' * 35)
print(f'\033[7;40m{" CADASTRAR CLIENTES ":*^70}\033[0;0m')
print('--' * 35)
codigo_cliente += 1
codigo_cliente_convertida = str(codigo_cliente)
print(f' --> {codigo_cliente}º CLIENTE - ORDEM DE SERVIÇO DE Nº [ {codigo_cliente_convertida} ]')
dados['Cód'] = [codigo_cliente]
lista_de_dados.append(codigo_cliente_convertida)
print('--' * 35)
nome = str(input('Digite o nome do cliente:?')).strip().upper()
dados['Nome'] = [nome]
lista_de_dados.append(nome)
# print(f'TESTE DADOS: {dados}')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
dados['Moeda origem'] = [moeda_origem]
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
else:
while True:
print(
f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_origem = str(input('Moeda de origem?: [somente números acima]:?'))
print('--' * 30)
if moeda_origem == '1':
print('MOEDA DE ORIGEM: - REAL - BRASIL')
dados['Moeda origem'] = ['REAL - BRL']
moeda_origem_sigla = 'R$:'
lista_de_dados.append('REAL - BRL')
break
elif moeda_origem == '2':
print('MOEDA DE ORIGEM: - DÓLAR - EUA')
dados['Moeda origem'] = ['DÓLAR - EUA']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_origem == '3':
print('MOEDA DE ORIGEM: - EURO - EUROPA')
dados['Moeda origem'] = ['EURO']
moeda_origem_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_origem == '4':
print('MOEDA DE ORIGEM: - DÓLAR - CANADÁ')
dados['Moeda origem'] = ['DÓLAR - CAD']
moeda_origem_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - CAD')
break
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
else:
while True:
print(
f'\033[1;41mVALOR INVÁLIDO - SOMENTE NÚMEROS DE 1 A 4 QUE CORRESPONDEM AS MOEDAS CADASTRADAS:\033[0;0m')
print('--' * 30)
print('------------------- MOEDAS CADASTRADAS -------------------')
print(' | Digite --> (1) para MOEDA REAL - BRASIL |')
print(' | Digite --> (2) para MOEDA DÓLAR - EUA |')
print(' | Digite --> (3) para MOEDA EURO - EUROPA |')
print(' | Digite --> (4) para MOEDA DÓLAR - CANADÁ |')
print(' -------------------------------------------')
moeda_destino = str(input('Moeda de destino?: [somente números acima]:?'))
print('--' * 30)
if moeda_destino == '1':
print('MOEDA DE DESTINO: - REAL - BRASIL')
dados['Moeda destino'] = ['REAL - BRASIL']
moeda_destino_sigla = 'R$:'
lista_de_dados.append('REAL - BRASIL')
break
elif moeda_destino == '2':
print('MOEDA DE DESTINO: - DÓLAR - EUA')
dados['Moeda destino'] = ['DÓLAR - EUA']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR - EUA')
break
elif moeda_destino == '3':
print('MOEDA DE DESTINO: - EURO - EUROPA')
dados['Moeda destino'] = ['EURO']
moeda_destino_sigla = '€:'
lista_de_dados.append('EURO')
break
elif moeda_destino == '4':
print('MOEDA DE DESTINO: - DÓLAR - CANADÁ')
dados['Moeda destino'] = ['DÓLAR CAD']
moeda_destino_sigla = 'U$$:'
lista_de_dados.append('DÓLAR CAD')
break
print('--' * 30)
data_operacao = str(input('Data da operação:\033[1;90m[NO FORMATO: __/__/____ ]\033[0;0m:?'))
dados['Data Operação'] = [data_operacao]
lista_de_dados.append(data_operacao)
print('--' * 30)
valor_original = str(input(f'Valor original:? {moeda_origem_sigla}'))
dados['Valor Original'] = [valor_original]
lista_de_dados.append(valor_original)
convertendo_valor = float(valor_original)
valor_tot_operacoes += convertendo_valor
if moeda_origem == '1' and moeda_destino == '2':
convertendo_valor_brasil = float(valor_original)
tot_movimento_brasil_dolar_eua += convertendo_valor_brasil
elif moeda_origem == '1' and moeda_destino == '3':
convertendo_valor_brasil_euro = float(valor_original)
tot_movimento_brasil_euro += convertendo_valor_brasil_euro
elif moeda_origem == '1' and moeda_destino == '4':
convertendo_valor_brasil_dolar_canada = float(valor_original)
tot_movimento_brasil_dolar_canada += convertendo_valor_brasil_dolar_canada
if moeda_origem == '2' and moeda_destino == '1':
convertendo_valor_eua_brasil = float(valor_original)
tot_movimento_dolar_eua_brasil += convertendo_valor_eua_brasil
elif moeda_origem == '2' and moeda_destino == '3':
convertendo_valor_eua_euro = float(valor_original)
tot_movimento_dolar_eua_euro += convertendo_valor_eua_euro
elif moeda_origem == '2' and moeda_destino == '4':
convertendo_valor_eua_canada = float(valor_original)
tot_movimento_dolar_eua_dolar_canada += convertendo_valor_eua_canada
if moeda_origem == '3' and moeda_destino == '1':
convertendo_valor_euro_brasil = float(valor_original)
tot_movimento_euro_brasil += convertendo_valor_euro_brasil
elif moeda_origem == '3' and moeda_destino == '2':
convertendo_valor_euro_eua = float(valor_original)
tot_movimento_euro_dolar_eua += convertendo_valor_euro_eua
elif moeda_origem == '3' and moeda_destino == '4':
convertendo_valor_euro_canada = float(valor_original)
tot_movimento_euro_dolar_canada += convertendo_valor_euro_canada
if moeda_origem == '4' and moeda_destino == '1':
convertendo_valor_canada_brasil = float(valor_original)
tot_movimento_dolar_canada_brasil += convertendo_valor_canada_brasil
elif moeda_origem == '4' and moeda_destino == '2':
convertendo_valor_canada_eua = float(valor_original)
tot_movimento_dolar_canada_dolar_eua += convertendo_valor_canada_eua
elif moeda_origem == '4' and moeda_destino == '3':
convertendo_valor_canada_euro = float(valor_original)
tot_movimento_dolar_canada_euro += convertendo_valor_canada_euro
print('--' * 30)
valor_convertido = str(input(f'Valor convertido:? {moeda_destino_sigla}'))
dados['Valor Convertido'] = [valor_convertido]
lista_de_dados.append(valor_convertido)
print('--' * 30)
taxa_cobrada = str(input(f'Taxa cobrada:? R$:'))
conversao_taxa = float(taxa_cobrada)
tot_taxas += conversao_taxa
dados['Taxa Cobrada'] = [taxa_cobrada]
lista_de_dados.append(taxa_cobrada)
print('--' * 30)
copia_dados.append(dados.copy())
lista_principal.append(lista_de_dados)
print('--' * 30)
print('\033[1;90m----------------------- FIM CADASTRO -----------------------\033[0;0m')
print('--' * 30)
while True:
continua_cadastro = \
str(input('\nCadastrar mais clientes:? [C - P/ CONTINUAR OU S - P/ SAIR]:?')).strip().upper()[0]
print('')
if continua_cadastro == 'C':
break
elif continua_cadastro == 'S':
break
else:
print(f'\033[1;41m- SOMENTE UMA DAS OPÇÕES ACIMA: [C - CONTINUAR/ S - SAIR]:\033[0;0m')
if continua_cadastro == 'S':
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
break
elif opcao_menu == '2':
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS ":*^70}\033[0;0m')
print('--' * 35)
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO DADOS\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> OPERAÇÕES REALIZADAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
print('--' * 35)
print(' -- | ABAIXO SEGUE A LISTA COMPLETA DE TODOS OS REGISTROS ATÉ O MOMENTO! | -- ')
print('--' * 35)
print(' -------------------- |SEQUÊNCIA E ORDEM DE COLUNAS| -------------- VALORES -------------')
print('1ºCÓD:|2ºNOME: |3ºMOED ORIG. |4º MOED DEST. |5º DATA: |6ºORIGI:|7ºCONV:|8ºTAXA. ')
print('---' * 30)
for c, v in enumerate(copia_dados):
for d in v.values():
print(f" {str(d).replace('[', '').replace(']', '').replace('', '')}", end=' ')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '3':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO OPERAÇÕES\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DAS OPERAÇÕES: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM UM TOTAL DE: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO !\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES !\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(
f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(
f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(
f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(
f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(
f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
else:
print(f'\033[1;41mSOMENTE DIGITE A LETRA [S]-PARA SAIR! :\033[0;0m')
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif opcao_menu == '4':
if codigo_cliente == 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print()
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print('\033[1;41mATÉ O PRESENTE MOMENTO NÃO HÁ NENHUMA OPERAÇÃO REALIZADA !\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print()
print('--' * 35)
print('--' * 35)
print(' \033[1;30m\033[1;43m VOLTANDO AO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
elif codigo_cliente > 0:
print('--' * 35)
print(' \033[1;40m Aguarde ! AVERIGUANDO SISTEMA\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;40m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;40m100%\033[0;0m', end='')
print('\033[1;40m \033[0;0m', end='')
print('\n')
print('--' * 35)
print(f'\033[7;40m{" RELATÓRIOS --> VALOR TOTAL DE TAXAS: ":*^70}\033[0;0m')
print('--' * 35)
print()
print(f'\033[1;42mATÉ O PRESENTE MOMENTO VOCÊ TEM: {codigo_cliente} \033[0;0m', end='')
if codigo_cliente == 1:
print('\033[1;42mOPERAÇÃO REGISTRADA!\033[0;0m')
else:
print('\033[1;42mOPERAÇÕES REGISTRADAS!\033[0;0m')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
print(f'SOMANDO TODAS AS ENTRADAS EM TAXAS: TEMOS UM TOTAL DÊ R$: {tot_taxas:.2f} REAIS.')
print(f'AS MESMAS SÃO REFERENTE AS DEVIDAS CONVERSÕES ABAIXO:')
print('\033[1;90m------------------------------------------------------------\033[0;0m')
if tot_movimento_brasil_dolar_eua > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_eua:.2f} REAIS.')
if tot_movimento_brasil_euro > 0:
print(
f'DE: BRL - BRASIL / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ R$: {valor_tot_operacoes:.2f} REAIS.')
if tot_movimento_brasil_dolar_canada > 0:
print(
f'DE: BRL - BRASIL / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ R$: {tot_movimento_brasil_dolar_canada:.2f} REAIS.')
if tot_movimento_dolar_eua_brasil > 0:
print(
f'DE: DÓLAR - EUA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_brasil:.2f} DÓLAR')
if tot_movimento_dolar_eua_euro > 0:
print(
f'DE: DÓLAR - EUA / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_euro:.2f} DÓLAR')
if tot_movimento_dolar_eua_dolar_canada > 0:
print(
f'DE: DÓLAR - EUA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_eua_dolar_canada:.2f} DÓLAR')
if tot_movimento_euro_brasil > 0:
print(
f'DE: EURO - EUROPA / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_brasil:.2f} EURO.')
if tot_movimento_euro_dolar_eua > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_eua:.2f} EURO.')
if tot_movimento_euro_dolar_canada > 0:
print(
f'DE: EURO - EUROPA / PARA: DÓLAR - CANADÁ / MOVIMENTAÇÃO TOTAL DÊ €: {tot_movimento_euro_dolar_canada:.2f} EURO.')
if tot_movimento_dolar_canada_brasil > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: BRL - BRASIL / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_brasil:.2f} DÓLAR')
if tot_movimento_dolar_canada_dolar_eua > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: DÓLAR - EUA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_dolar_eua:.2f} DÓLAR')
if tot_movimento_dolar_canada_euro > 0:
print(
f'DE: DÓLAR - CANADÁ / PARA: EURO - EUROPA / MOVIMENTAÇÃO TOTAL DÊ U$$: {tot_movimento_dolar_canada_euro:.2f} DÓLAR')
print()
print()
print()
print('--' * 35)
print('--' * 35)
while True:
voltar_menu_principal = str(input('Digite:[S]-SAIR:')).strip().upper()[0]
if voltar_menu_principal == 'S':
break
print('--' * 35)
print(' \033[1;30m\033[1;43m CARREGANDO MENU PRINCIPAL\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.3)
print('\033[1;30m\033[1;43m50%\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m.\033[0;0m', end='')
sleep(0.8)
print('\033[1;30m\033[1;43m100%\033[0;0m', end='')
print('\033[1;30m\033[1;43m \033[0;0m', end='')
print('\n')
| 0 | 0 | 0 |
721a71db00d39666bd8dc4ed09ae4dc0ad626673 | 910 | py | Python | Cogs/TestCog.py | BloomAutist47/bloom-bo | e3b298dc7ba27b8e526b18c2750b494b8a66ab3b | [
"CC0-1.0"
] | 1 | 2021-09-07T09:51:16.000Z | 2021-09-07T09:51:16.000Z | Cogs/TestCog.py | BloomAutist47/bloom-bo | e3b298dc7ba27b8e526b18c2750b494b8a66ab3b | [
"CC0-1.0"
] | null | null | null | Cogs/TestCog.py | BloomAutist47/bloom-bo | e3b298dc7ba27b8e526b18c2750b494b8a66ab3b | [
"CC0-1.0"
] | 3 | 2021-02-19T20:13:21.000Z | 2022-02-04T03:56:43.000Z | from .Base import *
from discord.ext import commands
from discord.utils import get | 30.333333 | 66 | 0.607692 | from .Base import *
from discord.ext import commands
from discord.utils import get
class TestCog(commands.Cog, BaseTools):
def __init__(self, bot):
self.setup()
self.bot = bot
self.list_links = {}
self.compare = {}
@commands.command()
async def test(self, ctx, *, value=""):
# searched_role = get(ctx.guild.roles, name='Daily Gifts')
channel = await self.bot.fetch_channel(801384957364142101)
# await channel.send(searched_role.mention)
await channel.send("<@&814054683651342366>")
# await channel.send("<&@814054683651342366>")
# await channel.send("<@!>")
@commands.command()
async def ee(self, ctx):
url="http://aqwwiki.wikidot.com/ewfwefwefwf"
soup = await self.contentcreator(url)
print("Retards: ", soup)
if soup =="None":
print("yes")
print(soup) | 658 | 147 | 23 |
c02d6565bfebaec5de189a39aa7ec6f21d1d6700 | 382 | py | Python | sweep_test.py | winksaville/cadquery-wing1 | 43da6a179e1a527401a4328764f3726048d66339 | [
"MIT"
] | null | null | null | sweep_test.py | winksaville/cadquery-wing1 | 43da6a179e1a527401a4328764f3726048d66339 | [
"MIT"
] | null | null | null | sweep_test.py | winksaville/cadquery-wing1 | 43da6a179e1a527401a4328764f3726048d66339 | [
"MIT"
] | null | null | null | a = 1
b = 5
h = b*5
d = a*5
s = (
cq.Workplane("XY")
.ellipse(a,b)
.sweep(
cq.Workplane("XZ")
# Start 0deg tangent, Tip finishes with 90deg tangent
.spline([(0, i) for i in range(h)] + [(d,h)],[(0,1),(1,0)])
# Start 0deg tangent, Tip finishes with 45deg tangent
#.spline([(0,0),(0,h),(d,h+5*d)],[(0,1),(1,1)]) #original
)
)
| 21.222222 | 67 | 0.494764 | a = 1
b = 5
h = b*5
d = a*5
s = (
cq.Workplane("XY")
.ellipse(a,b)
.sweep(
cq.Workplane("XZ")
# Start 0deg tangent, Tip finishes with 90deg tangent
.spline([(0, i) for i in range(h)] + [(d,h)],[(0,1),(1,0)])
# Start 0deg tangent, Tip finishes with 45deg tangent
#.spline([(0,0),(0,h),(d,h+5*d)],[(0,1),(1,1)]) #original
)
)
| 0 | 0 | 0 |
67ca4046b26fc6f2949edcf97d480f0651eba7a6 | 7,442 | py | Python | designateclient/functionaltests/v2/fixtures.py | mail2nsrajesh/python-designateclient | 3bb401758c00a9d66383484c60933421d9a21d63 | [
"Apache-2.0"
] | null | null | null | designateclient/functionaltests/v2/fixtures.py | mail2nsrajesh/python-designateclient | 3bb401758c00a9d66383484c60933421d9a21d63 | [
"Apache-2.0"
] | null | null | null | designateclient/functionaltests/v2/fixtures.py | mail2nsrajesh/python-designateclient | 3bb401758c00a9d66383484c60933421d9a21d63 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import tempfile
import traceback
import fixtures
from tempest.lib.exceptions import CommandFailed
from testtools.runtest import MultipleExceptions
from designateclient.functionaltests.client import DesignateCLI
class ZoneFixture(BaseFixture):
"""See DesignateCLI.zone_create for __init__ args"""
@classmethod
class TransferRequestFixture(BaseFixture):
"""See DesignateCLI.zone_transfer_request_create for __init__ args"""
@classmethod
class ExportFixture(BaseFixture):
"""See DesignateCLI.zone_export_create for __init__ args"""
@classmethod
class ImportFixture(BaseFixture):
"""See DesignateCLI.zone_import_create for __init__ args"""
@classmethod
class RecordsetFixture(BaseFixture):
"""See DesignateCLI.recordset_create for __init__ args"""
@classmethod
class TLDFixture(BaseFixture):
"""See DesignateCLI.tld_create for __init__ args"""
@classmethod
class BlacklistFixture(BaseFixture):
"""See DesignateCLI.zone_blacklist_create for __init__ args"""
@classmethod
| 34.775701 | 79 | 0.658963 | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import tempfile
import traceback
import fixtures
from tempest.lib.exceptions import CommandFailed
from testtools.runtest import MultipleExceptions
from designateclient.functionaltests.client import DesignateCLI
class BaseFixture(fixtures.Fixture):
def __init__(self, user='default', *args, **kwargs):
"""args/kwargs are forwarded to a create method on DesignateCLI"""
super(BaseFixture, self).__init__()
self.args = args
self.kwargs = kwargs
self.client = DesignateCLI.as_user(user)
def setUp(self):
# Sometimes, exceptions are raised in _setUp methods on fixtures.
# testtools pushes the exception into a MultipleExceptions object along
# with an artificial SetupError, which produces bad error messages.
# This just logs those stack traces to stderr for easier debugging.
try:
super(BaseFixture, self).setUp()
except MultipleExceptions as e:
for i, exc_info in enumerate(e.args):
print('--- printing MultipleExceptions traceback {} of {} ---'
.format(i + 1, len(e.args)), file=sys.stderr)
traceback.print_exception(*exc_info)
raise
class ZoneFixture(BaseFixture):
"""See DesignateCLI.zone_create for __init__ args"""
def _setUp(self):
super(ZoneFixture, self)._setUp()
self.zone = self.client.zone_create(*self.args, **self.kwargs)
self.addCleanup(self.cleanup_zone, self.client, self.zone.id)
@classmethod
def cleanup_zone(cls, client, zone_id):
try:
client.zone_delete(zone_id)
except CommandFailed:
pass
class TransferRequestFixture(BaseFixture):
"""See DesignateCLI.zone_transfer_request_create for __init__ args"""
def __init__(self, zone, user='default', target_user='alt', *args,
**kwargs):
super(TransferRequestFixture, self).__init__(user, *args, **kwargs)
self.zone = zone
self.target_client = DesignateCLI.as_user(target_user)
# the client has a bug such that it requires --target-project-id.
# when this bug is fixed, please remove this
self.kwargs['target_project_id'] = self.target_client.project_id
def _setUp(self):
super(TransferRequestFixture, self)._setUp()
self.transfer_request = self.client.zone_transfer_request_create(
zone_id=self.zone.id,
*self.args, **self.kwargs
)
self.addCleanup(self.cleanup_transfer_request, self.client,
self.transfer_request.id)
self.addCleanup(ZoneFixture.cleanup_zone, self.client, self.zone.id)
self.addCleanup(ZoneFixture.cleanup_zone, self.target_client,
self.zone.id)
@classmethod
def cleanup_transfer_request(cls, client, transfer_request_id):
try:
client.zone_transfer_request_delete(transfer_request_id)
except CommandFailed:
pass
class ExportFixture(BaseFixture):
"""See DesignateCLI.zone_export_create for __init__ args"""
def __init__(self, zone, user='default', *args, **kwargs):
super(ExportFixture, self).__init__(user, *args, **kwargs)
self.zone = zone
def _setUp(self):
super(ExportFixture, self)._setUp()
self.zone_export = self.client.zone_export_create(
zone_id=self.zone.id,
*self.args, **self.kwargs
)
self.addCleanup(self.cleanup_zone_export, self.client,
self.zone_export.id)
self.addCleanup(ZoneFixture.cleanup_zone, self.client, self.zone.id)
@classmethod
def cleanup_zone_export(cls, client, zone_export_id):
try:
client.zone_export_delete(zone_export_id)
except CommandFailed:
pass
class ImportFixture(BaseFixture):
"""See DesignateCLI.zone_import_create for __init__ args"""
def __init__(self, zone_file_contents, user='default', *args, **kwargs):
super(ImportFixture, self).__init__(user, *args, **kwargs)
self.zone_file_contents = zone_file_contents
def _setUp(self):
super(ImportFixture, self)._setUp()
with tempfile.NamedTemporaryFile() as f:
f.write(self.zone_file_contents)
f.flush()
self.zone_import = self.client.zone_import_create(
zone_file_path=f.name,
*self.args, **self.kwargs
)
self.addCleanup(self.cleanup_zone_import, self.client,
self.zone_import.id)
self.addCleanup(ZoneFixture.cleanup_zone, self.client,
self.zone_import.zone_id)
@classmethod
def cleanup_zone_import(cls, client, zone_import_id):
try:
client.zone_import_delete(zone_import_id)
except CommandFailed:
pass
class RecordsetFixture(BaseFixture):
"""See DesignateCLI.recordset_create for __init__ args"""
def _setUp(self):
super(RecordsetFixture, self)._setUp()
self.recordset = self.client.recordset_create(
*self.args, **self.kwargs)
self.addCleanup(self.cleanup_recordset, self.client,
self.recordset.zone_id, self.recordset.id)
@classmethod
def cleanup_recordset(cls, client, zone_id, recordset_id):
try:
client.recordset_delete(zone_id, recordset_id)
except CommandFailed:
pass
class TLDFixture(BaseFixture):
"""See DesignateCLI.tld_create for __init__ args"""
def __init__(self, user='admin', *args, **kwargs):
super(TLDFixture, self).__init__(user=user, *args, **kwargs)
def _setUp(self):
super(TLDFixture, self)._setUp()
self.tld = self.client.tld_create(*self.args, **self.kwargs)
self.addCleanup(self.cleanup_tld, self.client, self.tld.id)
@classmethod
def cleanup_tld(cls, client, tld_id):
try:
client.tld_delete(tld_id)
except CommandFailed:
pass
class BlacklistFixture(BaseFixture):
"""See DesignateCLI.zone_blacklist_create for __init__ args"""
def __init__(self, user='admin', *args, **kwargs):
super(BlacklistFixture, self).__init__(user=user, *args, **kwargs)
def _setUp(self):
super(BlacklistFixture, self)._setUp()
self.blacklist = self.client.zone_blacklist_create(*self.args,
**self.kwargs)
self.addCleanup(self.cleanup_blacklist, self.client, self.blacklist.id)
@classmethod
def cleanup_blacklist(cls, client, blacklist_id):
try:
client.zone_blacklist_delete(blacklist_id)
except CommandFailed:
pass
| 4,899 | 322 | 529 |
2dfdec1d2a034d95564f8f983b6af317c3bf00d9 | 135 | py | Python | app/recipe/admin.py | fdomingues-travelperk/recipes-exercise | fbc00f98ba93043850e8c2ec0512f18bff274551 | [
"MIT"
] | null | null | null | app/recipe/admin.py | fdomingues-travelperk/recipes-exercise | fbc00f98ba93043850e8c2ec0512f18bff274551 | [
"MIT"
] | null | null | null | app/recipe/admin.py | fdomingues-travelperk/recipes-exercise | fbc00f98ba93043850e8c2ec0512f18bff274551 | [
"MIT"
] | null | null | null | from django.contrib import admin
from recipe import models
admin.site.register(models.Recipe)
admin.site.register(models.Ingredient)
| 19.285714 | 38 | 0.82963 | from django.contrib import admin
from recipe import models
admin.site.register(models.Recipe)
admin.site.register(models.Ingredient)
| 0 | 0 | 0 |
6f66a39ae443cdb3ff11e4b0bec5ccb6b7a6512e | 24,373 | py | Python | nipype/interfaces/semtools/filtering/featuredetection.py | demianw/nipype | 52d64c30d96ecd94f1833156e28dce32c4f05ebe | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/semtools/filtering/featuredetection.py | demianw/nipype | 52d64c30d96ecd94f1833156e28dce32c4f05ebe | [
"BSD-3-Clause"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/semtools/filtering/featuredetection.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | 1 | 2016-10-11T19:18:53.000Z | 2016-10-11T19:18:53.000Z | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
import os
from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,
TraitedSpec, File, Directory, traits, isdefined,
InputMultiPath, OutputMultiPath)
class GenerateSummedGradientImage(SEMLikeCommandLine):
"""title: GenerateSummedGradient
category: Filtering.FeatureDetection
description: Automatic FeatureImages using neural networks
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Greg Harris, Eun Young Kim
"""
input_spec = GenerateSummedGradientImageInputSpec
output_spec = GenerateSummedGradientImageOutputSpec
_cmd = " GenerateSummedGradientImage "
_outputs_filenames = {'outputFileName': 'outputFileName'}
_redirect_x = False
class CannySegmentationLevelSetImageFilter(SEMLikeCommandLine):
"""title: Canny Level Set Image Filter
category: Filtering.FeatureDetection
description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask.
version: 0.3.0
license: CC
contributor: Regina Kim
acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions.
"""
input_spec = CannySegmentationLevelSetImageFilterInputSpec
output_spec = CannySegmentationLevelSetImageFilterOutputSpec
_cmd = " CannySegmentationLevelSetImageFilter "
_outputs_filenames = {'outputVolume': 'outputVolume.nii', 'outputSpeedVolume': 'outputSpeedVolume.nii'}
_redirect_x = False
class DilateImage(SEMLikeCommandLine):
"""title: Dilate Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to dilate the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DilateImageInputSpec
output_spec = DilateImageOutputSpec
_cmd = " DilateImage "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class TextureFromNoiseImageFilter(SEMLikeCommandLine):
"""title: TextureFromNoiseImageFilter
category: Filtering.FeatureDetection
description: Calculate the local noise in an image.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Eunyoung Regina Kim
"""
input_spec = TextureFromNoiseImageFilterInputSpec
output_spec = TextureFromNoiseImageFilterOutputSpec
_cmd = " TextureFromNoiseImageFilter "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class FlippedDifference(SEMLikeCommandLine):
"""title: Flip Image
category: Filtering.FeatureDetection
description: Difference between an image and the axially flipped version of that image.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = FlippedDifferenceInputSpec
output_spec = FlippedDifferenceOutputSpec
_cmd = " FlippedDifference "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class ErodeImage(SEMLikeCommandLine):
"""title: Erode Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to erode the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = ErodeImageInputSpec
output_spec = ErodeImageOutputSpec
_cmd = " ErodeImage "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class GenerateBrainClippedImage(SEMLikeCommandLine):
"""title: GenerateBrainClippedImage
category: Filtering.FeatureDetection
description: Automatic FeatureImages using neural networks
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Eun Young Kim
"""
input_spec = GenerateBrainClippedImageInputSpec
output_spec = GenerateBrainClippedImageOutputSpec
_cmd = " GenerateBrainClippedImage "
_outputs_filenames = {'outputFileName': 'outputFileName'}
_redirect_x = False
class NeighborhoodMedian(SEMLikeCommandLine):
"""title: Neighborhood Median
category: Filtering.FeatureDetection
description: Calculates the median, for the given neighborhood size, at each voxel of the input image.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = NeighborhoodMedianInputSpec
output_spec = NeighborhoodMedianOutputSpec
_cmd = " NeighborhoodMedian "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class GenerateTestImage(SEMLikeCommandLine):
"""title: DownSampleImage
category: Filtering.FeatureDetection
description: Down sample image for testing
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Eun Young Kim
"""
input_spec = GenerateTestImageInputSpec
output_spec = GenerateTestImageOutputSpec
_cmd = " GenerateTestImage "
_outputs_filenames = {'outputVolume': 'outputVolume'}
_redirect_x = False
class NeighborhoodMean(SEMLikeCommandLine):
"""title: Neighborhood Mean
category: Filtering.FeatureDetection
description: Calculates the mean, for the given neighborhood size, at each voxel of the T1, T2, and FLAIR.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = NeighborhoodMeanInputSpec
output_spec = NeighborhoodMeanOutputSpec
_cmd = " NeighborhoodMean "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class HammerAttributeCreator(SEMLikeCommandLine):
"""title: HAMMER Feature Vectors
category: Filtering.FeatureDetection
description: Create the feature vectors used by HAMMER.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This was extracted from the Hammer Registration source code, and wrapped up by Hans J. Johnson.
"""
input_spec = HammerAttributeCreatorInputSpec
output_spec = HammerAttributeCreatorOutputSpec
_cmd = " HammerAttributeCreator "
_outputs_filenames = {}
_redirect_x = False
class TextureMeasureFilter(SEMLikeCommandLine):
"""title: Canny Level Set Image Filter
category: Filtering.FeatureDetection
description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask.
version: 0.3.0
license: CC
contributor: Regina Kim
acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions.
"""
input_spec = TextureMeasureFilterInputSpec
output_spec = TextureMeasureFilterOutputSpec
_cmd = " TextureMeasureFilter "
_outputs_filenames = {'outputFilename': 'outputFilename'}
_redirect_x = False
class DilateMask(SEMLikeCommandLine):
"""title: Dilate Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to dilate the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DilateMaskInputSpec
output_spec = DilateMaskOutputSpec
_cmd = " DilateMask "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class DumpBinaryTrainingVectors(SEMLikeCommandLine):
"""title: Erode Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to erode the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DumpBinaryTrainingVectorsInputSpec
output_spec = DumpBinaryTrainingVectorsOutputSpec
_cmd = " DumpBinaryTrainingVectors "
_outputs_filenames = {}
_redirect_x = False
class DistanceMaps(SEMLikeCommandLine):
"""title: Mauerer Distance
category: Filtering.FeatureDetection
description: Get the distance from a voxel to the nearest voxel of a given tissue type.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DistanceMapsInputSpec
output_spec = DistanceMapsOutputSpec
_cmd = " DistanceMaps "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class STAPLEAnalysis(SEMLikeCommandLine):
"""title: Dilate Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to dilate the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = STAPLEAnalysisInputSpec
output_spec = STAPLEAnalysisOutputSpec
_cmd = " STAPLEAnalysis "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class GradientAnisotropicDiffusionImageFilter(SEMLikeCommandLine):
"""title: GradientAnisopropicDiffusionFilter
category: Filtering.FeatureDetection
description: Image Smoothing using Gradient Anisotropic Diffuesion Filer
contributor: This tool was developed by Eun Young Kim by modifying ITK Example
"""
input_spec = GradientAnisotropicDiffusionImageFilterInputSpec
output_spec = GradientAnisotropicDiffusionImageFilterOutputSpec
_cmd = " GradientAnisotropicDiffusionImageFilter "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class CannyEdge(SEMLikeCommandLine):
"""title: Canny Edge Detection
category: Filtering.FeatureDetection
description: Get the distance from a voxel to the nearest voxel of a given tissue type.
version: 0.1.0.(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was written by Hans J. Johnson.
"""
input_spec = CannyEdgeInputSpec
output_spec = CannyEdgeOutputSpec
_cmd = " CannyEdge "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
| 37.097412 | 287 | 0.761129 | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
import os
from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,
TraitedSpec, File, Directory, traits, isdefined,
InputMultiPath, OutputMultiPath)
class GenerateSummedGradientImageInputSpec(CommandLineInputSpec):
inputVolume1 = File(desc="input volume 1, usally t1 image", exists=True, argstr="--inputVolume1 %s")
inputVolume2 = File(desc="input volume 2, usally t2 image", exists=True, argstr="--inputVolume2 %s")
outputFileName = traits.Either(traits.Bool, File(), hash_files=False, desc="(required) output file name", argstr="--outputFileName %s")
MaximumGradient = traits.Bool(desc="If set this flag, it will compute maximum gradient between two input volumes instead of sum of it.", argstr="--MaximumGradient ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class GenerateSummedGradientImageOutputSpec(TraitedSpec):
outputFileName = File(desc="(required) output file name", exists=True)
class GenerateSummedGradientImage(SEMLikeCommandLine):
"""title: GenerateSummedGradient
category: Filtering.FeatureDetection
description: Automatic FeatureImages using neural networks
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Greg Harris, Eun Young Kim
"""
input_spec = GenerateSummedGradientImageInputSpec
output_spec = GenerateSummedGradientImageOutputSpec
_cmd = " GenerateSummedGradientImage "
_outputs_filenames = {'outputFileName': 'outputFileName'}
_redirect_x = False
class CannySegmentationLevelSetImageFilterInputSpec(CommandLineInputSpec):
inputVolume = File(exists=True, argstr="--inputVolume %s")
initialModel = File(exists=True, argstr="--initialModel %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, argstr="--outputVolume %s")
outputSpeedVolume = traits.Either(traits.Bool, File(), hash_files=False, argstr="--outputSpeedVolume %s")
cannyThreshold = traits.Float(desc="Canny Threshold Value", argstr="--cannyThreshold %f")
cannyVariance = traits.Float(desc="Canny variance", argstr="--cannyVariance %f")
advectionWeight = traits.Float(desc="Controls the smoothness of the resulting mask, small number are more smooth, large numbers allow more sharp corners. ", argstr="--advectionWeight %f")
initialModelIsovalue = traits.Float(desc="The identification of the input model iso-surface. (for a binary image with 0s and 1s use 0.5) (for a binary image with 0s and 255's use 127.5).", argstr="--initialModelIsovalue %f")
maxIterations = traits.Int(desc="The", argstr="--maxIterations %d")
class CannySegmentationLevelSetImageFilterOutputSpec(TraitedSpec):
outputVolume = File(exists=True)
outputSpeedVolume = File(exists=True)
class CannySegmentationLevelSetImageFilter(SEMLikeCommandLine):
"""title: Canny Level Set Image Filter
category: Filtering.FeatureDetection
description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask.
version: 0.3.0
license: CC
contributor: Regina Kim
acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions.
"""
input_spec = CannySegmentationLevelSetImageFilterInputSpec
output_spec = CannySegmentationLevelSetImageFilterOutputSpec
_cmd = " CannySegmentationLevelSetImageFilter "
_outputs_filenames = {'outputVolume': 'outputVolume.nii', 'outputSpeedVolume': 'outputSpeedVolume.nii'}
_redirect_x = False
class DilateImageInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputMaskVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputMaskVolume %s")
inputRadius = traits.Int(desc="Required: input neighborhood radius", argstr="--inputRadius %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class DilateImageOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class DilateImage(SEMLikeCommandLine):
"""title: Dilate Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to dilate the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DilateImageInputSpec
output_spec = DilateImageOutputSpec
_cmd = " DilateImage "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class TextureFromNoiseImageFilterInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputRadius = traits.Int(desc="Required: input neighborhood radius", argstr="--inputRadius %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class TextureFromNoiseImageFilterOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class TextureFromNoiseImageFilter(SEMLikeCommandLine):
"""title: TextureFromNoiseImageFilter
category: Filtering.FeatureDetection
description: Calculate the local noise in an image.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Eunyoung Regina Kim
"""
input_spec = TextureFromNoiseImageFilterInputSpec
output_spec = TextureFromNoiseImageFilterOutputSpec
_cmd = " TextureFromNoiseImageFilter "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class FlippedDifferenceInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputMaskVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputMaskVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class FlippedDifferenceOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class FlippedDifference(SEMLikeCommandLine):
"""title: Flip Image
category: Filtering.FeatureDetection
description: Difference between an image and the axially flipped version of that image.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = FlippedDifferenceInputSpec
output_spec = FlippedDifferenceOutputSpec
_cmd = " FlippedDifference "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class ErodeImageInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputMaskVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputMaskVolume %s")
inputRadius = traits.Int(desc="Required: input neighborhood radius", argstr="--inputRadius %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class ErodeImageOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class ErodeImage(SEMLikeCommandLine):
"""title: Erode Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to erode the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = ErodeImageInputSpec
output_spec = ErodeImageOutputSpec
_cmd = " ErodeImage "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class GenerateBrainClippedImageInputSpec(CommandLineInputSpec):
inputImg = File(desc="input volume 1, usally t1 image", exists=True, argstr="--inputImg %s")
inputMsk = File(desc="input volume 2, usally t2 image", exists=True, argstr="--inputMsk %s")
outputFileName = traits.Either(traits.Bool, File(), hash_files=False, desc="(required) output file name", argstr="--outputFileName %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class GenerateBrainClippedImageOutputSpec(TraitedSpec):
outputFileName = File(desc="(required) output file name", exists=True)
class GenerateBrainClippedImage(SEMLikeCommandLine):
"""title: GenerateBrainClippedImage
category: Filtering.FeatureDetection
description: Automatic FeatureImages using neural networks
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Eun Young Kim
"""
input_spec = GenerateBrainClippedImageInputSpec
output_spec = GenerateBrainClippedImageOutputSpec
_cmd = " GenerateBrainClippedImage "
_outputs_filenames = {'outputFileName': 'outputFileName'}
_redirect_x = False
class NeighborhoodMedianInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputMaskVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputMaskVolume %s")
inputRadius = traits.Int(desc="Required: input neighborhood radius", argstr="--inputRadius %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class NeighborhoodMedianOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class NeighborhoodMedian(SEMLikeCommandLine):
"""title: Neighborhood Median
category: Filtering.FeatureDetection
description: Calculates the median, for the given neighborhood size, at each voxel of the input image.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = NeighborhoodMedianInputSpec
output_spec = NeighborhoodMedianOutputSpec
_cmd = " NeighborhoodMedian "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class GenerateTestImageInputSpec(CommandLineInputSpec):
inputVolume = File(desc="input volume 1, usally t1 image", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="(required) output file name", argstr="--outputVolume %s")
lowerBoundOfOutputVolume = traits.Float(argstr="--lowerBoundOfOutputVolume %f")
upperBoundOfOutputVolume = traits.Float(argstr="--upperBoundOfOutputVolume %f")
outputVolumeSize = traits.Float(desc="output Volume Size", argstr="--outputVolumeSize %f")
class GenerateTestImageOutputSpec(TraitedSpec):
outputVolume = File(desc="(required) output file name", exists=True)
class GenerateTestImage(SEMLikeCommandLine):
"""title: DownSampleImage
category: Filtering.FeatureDetection
description: Down sample image for testing
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Eun Young Kim
"""
input_spec = GenerateTestImageInputSpec
output_spec = GenerateTestImageOutputSpec
_cmd = " GenerateTestImage "
_outputs_filenames = {'outputVolume': 'outputVolume'}
_redirect_x = False
class NeighborhoodMeanInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputMaskVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputMaskVolume %s")
inputRadius = traits.Int(desc="Required: input neighborhood radius", argstr="--inputRadius %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class NeighborhoodMeanOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class NeighborhoodMean(SEMLikeCommandLine):
"""title: Neighborhood Mean
category: Filtering.FeatureDetection
description: Calculates the mean, for the given neighborhood size, at each voxel of the T1, T2, and FLAIR.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = NeighborhoodMeanInputSpec
output_spec = NeighborhoodMeanOutputSpec
_cmd = " NeighborhoodMean "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class HammerAttributeCreatorInputSpec(CommandLineInputSpec):
Scale = traits.Int(desc="Determine Scale of Ball", argstr="--Scale %d")
Strength = traits.Float(desc="Determine Strength of Edges", argstr="--Strength %f")
inputGMVolume = File(desc="Required: input grey matter posterior image", exists=True, argstr="--inputGMVolume %s")
inputWMVolume = File(desc="Required: input white matter posterior image", exists=True, argstr="--inputWMVolume %s")
inputCSFVolume = File(desc="Required: input CSF posterior image", exists=True, argstr="--inputCSFVolume %s")
outputVolumeBase = traits.Str(desc="Required: output image base name to be appended for each feature vector.", argstr="--outputVolumeBase %s")
class HammerAttributeCreatorOutputSpec(TraitedSpec):
pass
class HammerAttributeCreator(SEMLikeCommandLine):
"""title: HAMMER Feature Vectors
category: Filtering.FeatureDetection
description: Create the feature vectors used by HAMMER.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This was extracted from the Hammer Registration source code, and wrapped up by Hans J. Johnson.
"""
input_spec = HammerAttributeCreatorInputSpec
output_spec = HammerAttributeCreatorOutputSpec
_cmd = " HammerAttributeCreator "
_outputs_filenames = {}
_redirect_x = False
class TextureMeasureFilterInputSpec(CommandLineInputSpec):
inputVolume = File(exists=True, argstr="--inputVolume %s")
inputMaskVolume = File(exists=True, argstr="--inputMaskVolume %s")
distance = traits.Int(argstr="--distance %d")
insideROIValue = traits.Float(argstr="--insideROIValue %f")
outputFilename = traits.Either(traits.Bool, File(), hash_files=False, argstr="--outputFilename %s")
class TextureMeasureFilterOutputSpec(TraitedSpec):
outputFilename = File(exists=True)
class TextureMeasureFilter(SEMLikeCommandLine):
"""title: Canny Level Set Image Filter
category: Filtering.FeatureDetection
description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask.
version: 0.3.0
license: CC
contributor: Regina Kim
acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions.
"""
input_spec = TextureMeasureFilterInputSpec
output_spec = TextureMeasureFilterOutputSpec
_cmd = " TextureMeasureFilter "
_outputs_filenames = {'outputFilename': 'outputFilename'}
_redirect_x = False
class DilateMaskInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
inputBinaryVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputBinaryVolume %s")
sizeStructuralElement = traits.Int(desc="size of structural element. sizeStructuralElement=1 means that 3x3x3 structuring element for 3D", argstr="--sizeStructuralElement %d")
lowerThreshold = traits.Float(desc="Required: lowerThreshold value", argstr="--lowerThreshold %f")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class DilateMaskOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class DilateMask(SEMLikeCommandLine):
"""title: Dilate Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to dilate the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DilateMaskInputSpec
output_spec = DilateMaskOutputSpec
_cmd = " DilateMask "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class DumpBinaryTrainingVectorsInputSpec(CommandLineInputSpec):
inputHeaderFilename = File(desc="Required: input header file name", exists=True, argstr="--inputHeaderFilename %s")
inputVectorFilename = File(desc="Required: input vector filename", exists=True, argstr="--inputVectorFilename %s")
class DumpBinaryTrainingVectorsOutputSpec(TraitedSpec):
pass
class DumpBinaryTrainingVectors(SEMLikeCommandLine):
"""title: Erode Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to erode the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DumpBinaryTrainingVectorsInputSpec
output_spec = DumpBinaryTrainingVectorsOutputSpec
_cmd = " DumpBinaryTrainingVectors "
_outputs_filenames = {}
_redirect_x = False
class DistanceMapsInputSpec(CommandLineInputSpec):
inputLabelVolume = File(desc="Required: input tissue label image", exists=True, argstr="--inputLabelVolume %s")
inputMaskVolume = File(desc="Required: input brain mask image", exists=True, argstr="--inputMaskVolume %s")
inputTissueLabel = traits.Int(desc="Required: input integer value of tissue type used to calculate distance", argstr="--inputTissueLabel %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class DistanceMapsOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class DistanceMaps(SEMLikeCommandLine):
"""title: Mauerer Distance
category: Filtering.FeatureDetection
description: Get the distance from a voxel to the nearest voxel of a given tissue type.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = DistanceMapsInputSpec
output_spec = DistanceMapsOutputSpec
_cmd = " DistanceMaps "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class STAPLEAnalysisInputSpec(CommandLineInputSpec):
inputDimension = traits.Int(desc="Required: input image Dimension 2 or 3", argstr="--inputDimension %d")
inputLabelVolume = InputMultiPath(File(exists=True), desc="Required: input label volume", argstr="--inputLabelVolume %s...")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class STAPLEAnalysisOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class STAPLEAnalysis(SEMLikeCommandLine):
"""title: Dilate Image
category: Filtering.FeatureDetection
description: Uses mathematical morphology to dilate the input images.
version: 0.1.0.$Revision: 1 $(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was developed by Mark Scully and Jeremy Bockholt.
"""
input_spec = STAPLEAnalysisInputSpec
output_spec = STAPLEAnalysisOutputSpec
_cmd = " STAPLEAnalysis "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class GradientAnisotropicDiffusionImageFilterInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image", exists=True, argstr="--inputVolume %s")
numberOfIterations = traits.Int(desc="Optional value for number of Iterations", argstr="--numberOfIterations %d")
timeStep = traits.Float(desc="Time step for diffusion process", argstr="--timeStep %f")
conductance = traits.Float(desc="Conductance for diffusion process", argstr="--conductance %f")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class GradientAnisotropicDiffusionImageFilterOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class GradientAnisotropicDiffusionImageFilter(SEMLikeCommandLine):
"""title: GradientAnisopropicDiffusionFilter
category: Filtering.FeatureDetection
description: Image Smoothing using Gradient Anisotropic Diffuesion Filer
contributor: This tool was developed by Eun Young Kim by modifying ITK Example
"""
input_spec = GradientAnisotropicDiffusionImageFilterInputSpec
output_spec = GradientAnisotropicDiffusionImageFilterOutputSpec
_cmd = " GradientAnisotropicDiffusionImageFilter "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class CannyEdgeInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input tissue label image", exists=True, argstr="--inputVolume %s")
variance = traits.Float(desc="Variance and Maximum error are used in the Gaussian smoothing of the input image. See itkDiscreteGaussianImageFilter for information on these parameters.", argstr="--variance %f")
upperThreshold = traits.Float(
desc="Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", argstr="--upperThreshold %f")
lowerThreshold = traits.Float(
desc="Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", argstr="--lowerThreshold %f")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image", argstr="--outputVolume %s")
class CannyEdgeOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: output image", exists=True)
class CannyEdge(SEMLikeCommandLine):
"""title: Canny Edge Detection
category: Filtering.FeatureDetection
description: Get the distance from a voxel to the nearest voxel of a given tissue type.
version: 0.1.0.(alpha)
documentation-url: http:://www.na-mic.org/
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: This tool was written by Hans J. Johnson.
"""
input_spec = CannyEdgeInputSpec
output_spec = CannyEdgeOutputSpec
_cmd = " CannyEdge "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
| 0 | 11,699 | 828 |
fb13245a2c7ed2656aecee51c83e4daf02bf09af | 1,699 | py | Python | product_catalog/tests/test_urls.py | ssorin/django-product-catalog | 98495de057a5bb6a04801dac800edc8fb3437f47 | [
"BSD-3-Clause"
] | 4 | 2018-12-02T14:51:47.000Z | 2020-12-12T09:06:18.000Z | product_catalog/tests/test_urls.py | ssorin/django-product-catalog | 98495de057a5bb6a04801dac800edc8fb3437f47 | [
"BSD-3-Clause"
] | 6 | 2018-12-20T08:22:25.000Z | 2022-03-11T23:11:30.000Z | product_catalog/tests/test_urls.py | ssorin/django-product-catalog | 98495de057a5bb6a04801dac800edc8fb3437f47 | [
"BSD-3-Clause"
] | 2 | 2017-12-06T23:56:14.000Z | 2021-07-23T16:16:18.000Z | # coding=utf-8
""" Product Catalog: urls test cases """
from django.test import TestCase
from django.urls import reverse
from django.urls import resolve
from product_catalog.models.product import Product
from product_catalog.models.category import Category
| 35.395833 | 83 | 0.64744 | # coding=utf-8
""" Product Catalog: urls test cases """
from django.test import TestCase
from django.urls import reverse
from django.urls import resolve
from product_catalog.models.product import Product
from product_catalog.models.category import Category
class UrlsTestCase(TestCase):
def setUp(self):
params = {'title': 'Product 1',
'content': 'Lorem ipsum',
'slug': 'product-1'}
self.product = Product.objects.create(**params)
self.category = Category.objects.create(title='Category 1',
slug='category-1')
def test_category_urls(self):
url = reverse('product_catalog:category_list')
self.assertEqual(url, '/categories/')
url = reverse('product_catalog:category_detail', args=[self.category.slug])
self.assertEqual(url, '/categories/%s/' % self.category.slug)
def test_product_urls(self):
url = reverse('product_catalog:product_list')
self.assertEqual(url, '/products/')
url = reverse('product_catalog:product_detail', args=[self.product.slug])
self.assertEqual(url, '/products/%s/' % self.product.slug)
url = reverse('product_catalog:product_home')
self.assertEqual(url, '/')
url = reverse('product_catalog:product_add')
self.assertEqual(url, '/products/add/')
url = reverse('product_catalog:product_update', args=[self.product.id])
self.assertEqual(url, '/products/update/%s/' % self.product.pk)
url = reverse('product_catalog:product_delete', args=[self.product.id])
self.assertEqual(url, '/products/delete/%s/' % self.product.pk)
| 1,327 | 8 | 104 |
c0624a4c47c50d8a78a829cdb8707fb16a0bbf74 | 8,194 | py | Python | code/msa.py | yanwang271/PKSpop | 458e719a4bf1c67a9ea6a3f70b579d62e24a0191 | [
"BSD-3-Clause"
] | 2 | 2019-04-11T16:35:31.000Z | 2019-04-11T16:43:44.000Z | code/msa.py | yanwang271/PKSpop | 458e719a4bf1c67a9ea6a3f70b579d62e24a0191 | [
"BSD-3-Clause"
] | null | null | null | code/msa.py | yanwang271/PKSpop | 458e719a4bf1c67a9ea6a3f70b579d62e24a0191 | [
"BSD-3-Clause"
] | 3 | 2019-04-11T16:36:36.000Z | 2020-11-20T16:10:04.000Z | #!/usr/bin/env python3
"""
Cluster and align the docking domain sequences
"""
import os
import subprocess
from Bio import SeqIO
from extract_seq import remove_invalid_pro
def clustering(info_dict):
'''
Cluster the query sequences to 3 classes
'''
c_seq = info_dict['c_seq_path']
n_seq = info_dict['n_seq_path']
pro_ls = info_dict['protein_id']
print(f'Clustering sequence...')
c_class_seq = cluster_seq(c_seq,'PKSpop/data/hmm_profile/c_groups_hmmpf')
n_class_seq = cluster_seq(n_seq,'PKSpop/data/hmm_profile/n_groups_hmmpf')
for pro in pro_ls:
if pro not in c_class_seq:
if not info_dict['end_pro']:
info_dict["end_pro"].append(pro)
if pro not in n_class_seq:
if pro not in info_dict['start_pro']:
info_dict["start_pro"].append(pro)
### Check if there are proteins that do not have c and n termini
remove_invalid_pro(info_dict)
### Check if there are more than one start/end protein
wether_predict(info_dict)
c_class1_fl = c_seq.replace('raw.fasta',\
f'class_1.fasta')
n_class1_fl = n_seq.replace('raw.fasta',\
f'class_1.fasta')
if not os.path.exists(c_class1_fl):
raise Exception('There is no c-term class 1 docking domain in\
this assembly line, cannot perform prediction')
if not os.path.exists(n_class1_fl):
raise Exception('There is no n-term class 1 docking domain in\
this assembly line, cannot perform prediction')
with open(c_class1_fl) as c_class1_seq:
num_seq_c = len(list(SeqIO.parse(c_class1_seq,'fasta')))
with open(n_class1_fl) as n_class1_seq:
num_seq_n = len(list(SeqIO.parse(n_class1_seq,'fasta')))
if num_seq_c != num_seq_c:
raise Exception('Unequal number of class 1 c and n docking domain,\
cannot perform the protein order prediction')
return info_dict
def cluster_seq(query_fl, hmmpf_db):
'''
Cluster the query sequences to 3 classes
Generate sequence fasta files of of 3 classes
'''
hmmscan_out_path = query_fl.replace('raw.fasta',\
'hmmscan_oupt.txt')
### Run hmmscan to asign each seq to its best match class
run_hmmscan(hmmscan_out_path, hmmpf_db, query_fl)
group, class_seq = parse_hmmscan_out(hmmscan_out_path)
group_seq_fasta(group, query_fl)
return class_seq
def msa(info_dict):
'''
Aligning sequences by HMMER
'''
c_seq = info_dict['c_seq_path']
n_seq = info_dict['n_seq_path']
### Use class1 sequences to perform MSA
c_inpt = c_seq.replace('raw.fasta',\
f'class_1.fasta')
n_inpt = n_seq.replace('raw.fasta',\
f'class_1.fasta')
### Add a already cut sequence to help locate the cutting position
add_position_helper(c_inpt, 'c')
add_position_helper(n_inpt, 'n')
### Align sequeces against hmm profile
c_align = c_inpt.replace('.fasta','_aln.afa')
n_align = n_inpt.replace('.fasta','_aln.afa')
print(f'Aligning sequences...')
run_hmmalign(c_align, 'PKSpop/data/hmm_profile/c_group_1.hmm', c_inpt)
run_hmmalign(n_align, 'PKSpop/data/hmm_profile/n_group_1.hmm', n_inpt)
### Identify the cutting position and cut the sequence
c_start, c_end = find_cut_position(c_align)
c_cut_fl = cut_seq(c_align, c_start, c_end)
n_start, n_end = find_cut_position(n_align)
n_cut_fl = cut_seq(n_align, n_start, n_end)
info_dict.update({'c_align_path':c_cut_fl, 'n_align_path':n_cut_fl})
return info_dict
def cut_seq(aln_fl, start_pos, end_pos):
'''
Cut the aligned sequences to obtain the part that used to perform
coevolutionary analysis
'''
cut_fl = aln_fl.replace('_aln.afa','.afa')
cut_seq = open(cut_fl, 'w')
aln_seq = open(aln_fl)
record = list(SeqIO.parse(aln_seq, 'fasta'))
for r in record:
if 'positioning' not in r.name:
seq = str(r.seq)[start_pos:end_pos]
cut_seq.write(f'>{r.name}\n{seq}\n')
aln_seq.close()
cut_seq.close()
return cut_fl
def add_position_helper(seq_fl, c_n):
'''
Add a already cut sequence to help locate the cutting position
'''
pos_seq = open('PKSpop/data/hmm_profile/positioning_helper.fasta')
record = list(SeqIO.parse(pos_seq, 'fasta'))
with open(seq_fl,'a') as seq:
if c_n == 'c':
r = record[0]
else:
r = record[1]
seq.write(f'>{r.name}\n{str(r.seq)}\n')
pos_seq.close()
def find_cut_position(aln_fl):
'''
Find the cutting position on the aligned sequences
'''
with open(aln_fl) as aln_seq:
record = list(SeqIO.parse(aln_seq, 'fasta'))
pos_seq = str(record[-1].seq)
for i in range(len(pos_seq)):
if pos_seq[i].isalpha() and not_alpha_s(pos_seq[:i]):
start_pos = i
elif pos_seq[i].isalpha() and not_alpha_s(pos_seq[i+1:]):
end_pos = i+1
return start_pos, end_pos
def not_alpha_s(string):
'''
Find wether there is a alpha in a string
'''
res = True
for s in string:
if s.isalpha():
res = False
return res
def run_hmmalign(aligned_fl, hmmfile, input_fl):
'''
Call HMMER
multiple sequence alignment against the profile
'''
cmd = f'hmmalign -o {aligned_fl} --outformat afa {hmmfile} {input_fl}'
subprocess.check_call(cmd.split(' '))
def run_hmmscan(hmmscan_out,hmmfile_db,query_fl):
'''
Search query sequences against hmmfile database,
to find out which family they belong
'''
cmd = f'hmmscan --tblout {hmmscan_out} {hmmfile_db} {query_fl}'
subprocess.check_call(cmd.split(' '))
def parse_hmmscan_out(hmmscan_out):
'''
Find which group each sequence belong to and
Store the group information into a dictionary: {group_name:[sequence_id],}
'''
scan = open(hmmscan_out)
group = {}
class_seq = []
last_seq = ''
for line in scan:
if not line.startswith('#'):
info = line.strip().split(' - ')
info = [info[0].strip(),info[1].strip()]
class_seq.append(info[1])
if not info[0] in group:
group.update({info[0]:[]})
if info[1] != last_seq:
group[info[0]].append(info[1])
last_seq = info[1]
else:
last_seq = info[1]
scan.close()
return group, class_seq
def group_seq_fasta(group, query_fl):
'''
Write sequences from a group, which is detected by hmmscan,
to a new fasta file
'''
query_seq = open(query_fl)
record = list(SeqIO.parse(query_seq,'fasta'))
for name in group.keys():
out_fl = query_fl.replace('raw.fasta',\
f'class_{name[-1]}.fasta')
fasta = open(out_fl,'w')
for item in group[name]:
for r in record:
if r.name == item:
fasta.write(f'>{item}\n{str(r.seq)}\n')
break
fasta.close()
query_seq.close()
def wether_predict(info_dict):
'''
Stop the prediction if there are more than one start/end protein in
the assembly line
'''
start_pro = info_dict['start_pro']
if len(start_pro) > 1:
raise Exception(f'There are more than 1 start protein: \
{",".join(start_pro)}, the protein order cannot be predicted')
elif len(start_pro) == 1:
info_dict['start_pro'] = start_pro[0]
else:
info_dict['start_pro'] = ''
end_pro = info_dict['end_pro']
if len(end_pro) > 1:
raise Exception(f'There are more than 1 end protein: \
{",".join(end_pro)}, the protein order cannot be predicted')
elif len(end_pro) == 1:
info_dict['end_pro'] = end_pro[0]
else:
info_dict['end_pro'] = ''
| 33.444898 | 95 | 0.601782 | #!/usr/bin/env python3
"""
Cluster and align the docking domain sequences
"""
import os
import subprocess
from Bio import SeqIO
from extract_seq import remove_invalid_pro
def clustering(info_dict):
'''
Cluster the query sequences to 3 classes
'''
c_seq = info_dict['c_seq_path']
n_seq = info_dict['n_seq_path']
pro_ls = info_dict['protein_id']
print(f'Clustering sequence...')
c_class_seq = cluster_seq(c_seq,'PKSpop/data/hmm_profile/c_groups_hmmpf')
n_class_seq = cluster_seq(n_seq,'PKSpop/data/hmm_profile/n_groups_hmmpf')
for pro in pro_ls:
if pro not in c_class_seq:
if not info_dict['end_pro']:
info_dict["end_pro"].append(pro)
if pro not in n_class_seq:
if pro not in info_dict['start_pro']:
info_dict["start_pro"].append(pro)
### Check if there are proteins that do not have c and n termini
remove_invalid_pro(info_dict)
### Check if there are more than one start/end protein
wether_predict(info_dict)
c_class1_fl = c_seq.replace('raw.fasta',\
f'class_1.fasta')
n_class1_fl = n_seq.replace('raw.fasta',\
f'class_1.fasta')
if not os.path.exists(c_class1_fl):
raise Exception('There is no c-term class 1 docking domain in\
this assembly line, cannot perform prediction')
if not os.path.exists(n_class1_fl):
raise Exception('There is no n-term class 1 docking domain in\
this assembly line, cannot perform prediction')
with open(c_class1_fl) as c_class1_seq:
num_seq_c = len(list(SeqIO.parse(c_class1_seq,'fasta')))
with open(n_class1_fl) as n_class1_seq:
num_seq_n = len(list(SeqIO.parse(n_class1_seq,'fasta')))
if num_seq_c != num_seq_c:
raise Exception('Unequal number of class 1 c and n docking domain,\
cannot perform the protein order prediction')
return info_dict
def cluster_seq(query_fl, hmmpf_db):
'''
Cluster the query sequences to 3 classes
Generate sequence fasta files of of 3 classes
'''
hmmscan_out_path = query_fl.replace('raw.fasta',\
'hmmscan_oupt.txt')
### Run hmmscan to asign each seq to its best match class
run_hmmscan(hmmscan_out_path, hmmpf_db, query_fl)
group, class_seq = parse_hmmscan_out(hmmscan_out_path)
group_seq_fasta(group, query_fl)
return class_seq
def msa(info_dict):
'''
Aligning sequences by HMMER
'''
c_seq = info_dict['c_seq_path']
n_seq = info_dict['n_seq_path']
### Use class1 sequences to perform MSA
c_inpt = c_seq.replace('raw.fasta',\
f'class_1.fasta')
n_inpt = n_seq.replace('raw.fasta',\
f'class_1.fasta')
### Add a already cut sequence to help locate the cutting position
add_position_helper(c_inpt, 'c')
add_position_helper(n_inpt, 'n')
### Align sequeces against hmm profile
c_align = c_inpt.replace('.fasta','_aln.afa')
n_align = n_inpt.replace('.fasta','_aln.afa')
print(f'Aligning sequences...')
run_hmmalign(c_align, 'PKSpop/data/hmm_profile/c_group_1.hmm', c_inpt)
run_hmmalign(n_align, 'PKSpop/data/hmm_profile/n_group_1.hmm', n_inpt)
### Identify the cutting position and cut the sequence
c_start, c_end = find_cut_position(c_align)
c_cut_fl = cut_seq(c_align, c_start, c_end)
n_start, n_end = find_cut_position(n_align)
n_cut_fl = cut_seq(n_align, n_start, n_end)
info_dict.update({'c_align_path':c_cut_fl, 'n_align_path':n_cut_fl})
return info_dict
def cut_seq(aln_fl, start_pos, end_pos):
'''
Cut the aligned sequences to obtain the part that used to perform
coevolutionary analysis
'''
cut_fl = aln_fl.replace('_aln.afa','.afa')
cut_seq = open(cut_fl, 'w')
aln_seq = open(aln_fl)
record = list(SeqIO.parse(aln_seq, 'fasta'))
for r in record:
if 'positioning' not in r.name:
seq = str(r.seq)[start_pos:end_pos]
cut_seq.write(f'>{r.name}\n{seq}\n')
aln_seq.close()
cut_seq.close()
return cut_fl
def add_position_helper(seq_fl, c_n):
'''
Add a already cut sequence to help locate the cutting position
'''
pos_seq = open('PKSpop/data/hmm_profile/positioning_helper.fasta')
record = list(SeqIO.parse(pos_seq, 'fasta'))
with open(seq_fl,'a') as seq:
if c_n == 'c':
r = record[0]
else:
r = record[1]
seq.write(f'>{r.name}\n{str(r.seq)}\n')
pos_seq.close()
def find_cut_position(aln_fl):
'''
Find the cutting position on the aligned sequences
'''
with open(aln_fl) as aln_seq:
record = list(SeqIO.parse(aln_seq, 'fasta'))
pos_seq = str(record[-1].seq)
for i in range(len(pos_seq)):
if pos_seq[i].isalpha() and not_alpha_s(pos_seq[:i]):
start_pos = i
elif pos_seq[i].isalpha() and not_alpha_s(pos_seq[i+1:]):
end_pos = i+1
return start_pos, end_pos
def not_alpha_s(string):
'''
Find wether there is a alpha in a string
'''
res = True
for s in string:
if s.isalpha():
res = False
return res
def run_hmmalign(aligned_fl, hmmfile, input_fl):
'''
Call HMMER
multiple sequence alignment against the profile
'''
cmd = f'hmmalign -o {aligned_fl} --outformat afa {hmmfile} {input_fl}'
subprocess.check_call(cmd.split(' '))
def run_hmmscan(hmmscan_out,hmmfile_db,query_fl):
'''
Search query sequences against hmmfile database,
to find out which family they belong
'''
cmd = f'hmmscan --tblout {hmmscan_out} {hmmfile_db} {query_fl}'
subprocess.check_call(cmd.split(' '))
def parse_hmmscan_out(hmmscan_out):
'''
Find which group each sequence belong to and
Store the group information into a dictionary: {group_name:[sequence_id],}
'''
scan = open(hmmscan_out)
group = {}
class_seq = []
last_seq = ''
for line in scan:
if not line.startswith('#'):
info = line.strip().split(' - ')
info = [info[0].strip(),info[1].strip()]
class_seq.append(info[1])
if not info[0] in group:
group.update({info[0]:[]})
if info[1] != last_seq:
group[info[0]].append(info[1])
last_seq = info[1]
else:
last_seq = info[1]
scan.close()
return group, class_seq
def group_seq_fasta(group, query_fl):
'''
Write sequences from a group, which is detected by hmmscan,
to a new fasta file
'''
query_seq = open(query_fl)
record = list(SeqIO.parse(query_seq,'fasta'))
for name in group.keys():
out_fl = query_fl.replace('raw.fasta',\
f'class_{name[-1]}.fasta')
fasta = open(out_fl,'w')
for item in group[name]:
for r in record:
if r.name == item:
fasta.write(f'>{item}\n{str(r.seq)}\n')
break
fasta.close()
query_seq.close()
def wether_predict(info_dict):
'''
Stop the prediction if there are more than one start/end protein in
the assembly line
'''
start_pro = info_dict['start_pro']
if len(start_pro) > 1:
raise Exception(f'There are more than 1 start protein: \
{",".join(start_pro)}, the protein order cannot be predicted')
elif len(start_pro) == 1:
info_dict['start_pro'] = start_pro[0]
else:
info_dict['start_pro'] = ''
end_pro = info_dict['end_pro']
if len(end_pro) > 1:
raise Exception(f'There are more than 1 end protein: \
{",".join(end_pro)}, the protein order cannot be predicted')
elif len(end_pro) == 1:
info_dict['end_pro'] = end_pro[0]
else:
info_dict['end_pro'] = ''
| 0 | 0 | 0 |
83a66691e7934e92a54af2411e1d853cc3a1d32c | 1,146 | py | Python | examples/reproject.py | EdsonGermano/socrata-py-walkthrough | 56901627fd3760987a8133a16710fbc5c5517aac | [
"Apache-2.0"
] | 1 | 2017-10-31T18:37:12.000Z | 2017-10-31T18:37:12.000Z | examples/reproject.py | EdsonGermano/socrata-py-walkthrough | 56901627fd3760987a8133a16710fbc5c5517aac | [
"Apache-2.0"
] | null | null | null | examples/reproject.py | EdsonGermano/socrata-py-walkthrough | 56901627fd3760987a8133a16710fbc5c5517aac | [
"Apache-2.0"
] | null | null | null | import sys
from examples.auth import authorization
from socrata import Socrata
socrata = Socrata(authorization)
file_path = sys.argv[1]
"""
This shows reprojecting from British National Grid
to WGS84
We're using the proj4 def from here:
http://spatialreference.org/ref/epsg/27700/
"""
with open(file_path, 'rb') as file:
(revision, output_schema) = socrata.create(
name = "parking structures",
description = "cool"
).csv(file)
(ok, output_schema) = output_schema\
.add_column(
'point_wgs84',
'Location',
"""
reproject_to_wgs84(
set_projection(
make_point(
to_number(`northing`),
to_number(`easting`)
),
"+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +ellps=airy +datum=OSGB36 +units=m +no_defs"
)
)
""",
'the easting/northing as wgs84 point'
)\
.run()
revision.apply(output_schema = output_schema)
revision.open_in_browser()
| 24.382979 | 138 | 0.558464 | import sys
from examples.auth import authorization
from socrata import Socrata
socrata = Socrata(authorization)
file_path = sys.argv[1]
"""
This shows reprojecting from British National Grid
to WGS84
We're using the proj4 def from here:
http://spatialreference.org/ref/epsg/27700/
"""
with open(file_path, 'rb') as file:
(revision, output_schema) = socrata.create(
name = "parking structures",
description = "cool"
).csv(file)
(ok, output_schema) = output_schema\
.add_column(
'point_wgs84',
'Location',
"""
reproject_to_wgs84(
set_projection(
make_point(
to_number(`northing`),
to_number(`easting`)
),
"+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +ellps=airy +datum=OSGB36 +units=m +no_defs"
)
)
""",
'the easting/northing as wgs84 point'
)\
.run()
revision.apply(output_schema = output_schema)
revision.open_in_browser()
| 0 | 0 | 0 |
596ff1419f0846f4d08cc8c3657fe39286179bfa | 7,650 | py | Python | yandex/cloud/mdb/greenplum/v1/resource_preset_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/greenplum/v1/resource_preset_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/greenplum/v1/resource_preset_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/greenplum/v1/resource_preset.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/greenplum/v1/resource_preset.proto',
package='yandex.cloud.mdb.greenplum.v1',
syntax='proto3',
serialized_options=b'\n!yandex.cloud.api.mdb.greenplum.v1ZKgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1;greenplum',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3yandex/cloud/mdb/greenplum/v1/resource_preset.proto\x12\x1dyandex.cloud.mdb.greenplum.v1\"\xb5\x02\n\x0eResourcePreset\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08zone_ids\x18\x02 \x03(\t\x12\r\n\x05\x63ores\x18\x03 \x01(\x03\x12\x0e\n\x06memory\x18\x04 \x01(\x03\x12@\n\x04type\x18\x05 \x01(\x0e\x32\x32.yandex.cloud.mdb.greenplum.v1.ResourcePreset.Type\x12\x16\n\x0emin_host_count\x18\x06 \x01(\x03\x12\x16\n\x0emax_host_count\x18\x07 \x01(\x03\x12\x1a\n\x12host_count_divider\x18\x08 \x01(\x03\x12!\n\x19max_segment_in_host_count\x18\t \x01(\x03\"5\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07SEGMENT\x10\x02\x42p\n!yandex.cloud.api.mdb.greenplum.v1ZKgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1;greenplumb\x06proto3'
)
_RESOURCEPRESET_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MASTER', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SEGMENT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=343,
serialized_end=396,
)
_sym_db.RegisterEnumDescriptor(_RESOURCEPRESET_TYPE)
_RESOURCEPRESET = _descriptor.Descriptor(
name='ResourcePreset',
full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='zone_ids', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.zone_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cores', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.cores', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='memory', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.memory', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='min_host_count', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.min_host_count', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_host_count', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.max_host_count', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_count_divider', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.host_count_divider', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_segment_in_host_count', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.max_segment_in_host_count', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESOURCEPRESET_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=396,
)
_RESOURCEPRESET.fields_by_name['type'].enum_type = _RESOURCEPRESET_TYPE
_RESOURCEPRESET_TYPE.containing_type = _RESOURCEPRESET
DESCRIPTOR.message_types_by_name['ResourcePreset'] = _RESOURCEPRESET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourcePreset = _reflection.GeneratedProtocolMessageType('ResourcePreset', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEPRESET,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.resource_preset_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.ResourcePreset)
})
_sym_db.RegisterMessage(ResourcePreset)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 47.515528 | 804 | 0.765621 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/greenplum/v1/resource_preset.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/greenplum/v1/resource_preset.proto',
package='yandex.cloud.mdb.greenplum.v1',
syntax='proto3',
serialized_options=b'\n!yandex.cloud.api.mdb.greenplum.v1ZKgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1;greenplum',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3yandex/cloud/mdb/greenplum/v1/resource_preset.proto\x12\x1dyandex.cloud.mdb.greenplum.v1\"\xb5\x02\n\x0eResourcePreset\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08zone_ids\x18\x02 \x03(\t\x12\r\n\x05\x63ores\x18\x03 \x01(\x03\x12\x0e\n\x06memory\x18\x04 \x01(\x03\x12@\n\x04type\x18\x05 \x01(\x0e\x32\x32.yandex.cloud.mdb.greenplum.v1.ResourcePreset.Type\x12\x16\n\x0emin_host_count\x18\x06 \x01(\x03\x12\x16\n\x0emax_host_count\x18\x07 \x01(\x03\x12\x1a\n\x12host_count_divider\x18\x08 \x01(\x03\x12!\n\x19max_segment_in_host_count\x18\t \x01(\x03\"5\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06MASTER\x10\x01\x12\x0b\n\x07SEGMENT\x10\x02\x42p\n!yandex.cloud.api.mdb.greenplum.v1ZKgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1;greenplumb\x06proto3'
)
_RESOURCEPRESET_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MASTER', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SEGMENT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=343,
serialized_end=396,
)
_sym_db.RegisterEnumDescriptor(_RESOURCEPRESET_TYPE)
_RESOURCEPRESET = _descriptor.Descriptor(
name='ResourcePreset',
full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='zone_ids', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.zone_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cores', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.cores', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='memory', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.memory', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='min_host_count', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.min_host_count', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_host_count', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.max_host_count', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_count_divider', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.host_count_divider', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_segment_in_host_count', full_name='yandex.cloud.mdb.greenplum.v1.ResourcePreset.max_segment_in_host_count', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESOURCEPRESET_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=396,
)
_RESOURCEPRESET.fields_by_name['type'].enum_type = _RESOURCEPRESET_TYPE
_RESOURCEPRESET_TYPE.containing_type = _RESOURCEPRESET
DESCRIPTOR.message_types_by_name['ResourcePreset'] = _RESOURCEPRESET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourcePreset = _reflection.GeneratedProtocolMessageType('ResourcePreset', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEPRESET,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.resource_preset_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.ResourcePreset)
})
_sym_db.RegisterMessage(ResourcePreset)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
9b91366659a3c4d04636e40540c25f7eb5e166e1 | 380 | py | Python | PythonProgram/learningList.challenge.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | 1 | 2022-01-14T18:03:42.000Z | 2022-01-14T18:03:42.000Z | PythonProgram/learningList.challenge.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | null | null | null | PythonProgram/learningList.challenge.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | null | null | null | # display only the IP addresses to the screen.
iplist = [ 5060, "80", 55, "10.0.0.1", "10.20.30.1", "ssh" ]
# example 1 - add up the strings
print("IP addresses: " + iplist[3] + ", and " + iplist[4])
# example 2 - use the comma separator
print("IP addresses:", iplist[3], ", and", iplist[4])
# example 3 - use an 'f-string'
print(f"IP addresses: {iplist[3]}, and {iplist[4]}")
| 31.666667 | 60 | 0.621053 | # display only the IP addresses to the screen.
iplist = [ 5060, "80", 55, "10.0.0.1", "10.20.30.1", "ssh" ]
# example 1 - add up the strings
print("IP addresses: " + iplist[3] + ", and " + iplist[4])
# example 2 - use the comma separator
print("IP addresses:", iplist[3], ", and", iplist[4])
# example 3 - use an 'f-string'
print(f"IP addresses: {iplist[3]}, and {iplist[4]}")
| 0 | 0 | 0 |
62e51c96917728ce58f07d0e579853deb7b9df81 | 1,672 | py | Python | examples/fake_blinkt.py | druck13/blinkt | 4370a64cbd4cf7d6177967b902f16793a4c5fa60 | [
"MIT"
] | null | null | null | examples/fake_blinkt.py | druck13/blinkt | 4370a64cbd4cf7d6177967b902f16793a4c5fa60 | [
"MIT"
] | null | null | null | examples/fake_blinkt.py | druck13/blinkt | 4370a64cbd4cf7d6177967b902f16793a4c5fa60 | [
"MIT"
] | null | null | null | """fake_blink terminal simulation of blinkt, to use rename to blinkt.py and place in same directory as blinkt program"""
import sys
import pantilthat
import atexit
import signal
_clear_on_exit = True
_true_color = True
NUM_PIXELS = 8
pixels = [(0,0,0)] * NUM_PIXELS
def set_clear_on_exit(value=True):
"""Set whether Blinkt! should be cleared upon exit
By default Blinkt! will turn off the pixels on exit, but calling::
blinkt.set_clear_on_exit(False)
Will ensure that it does not.
:param value: True or False (default True)
"""
global _clear_on_exit
_clear_on_exit = value
# Module Initialisation
atexit.register(_exit)
| 21.714286 | 120 | 0.597488 | """fake_blink terminal simulation of blinkt, to use rename to blinkt.py and place in same directory as blinkt program"""
import sys
import pantilthat
import atexit
import signal
_clear_on_exit = True
_true_color = True
NUM_PIXELS = 8
pixels = [(0,0,0)] * NUM_PIXELS
def _exit():
if _clear_on_exit:
clear()
show()
else:
print("")
def set_brightness(brightness):
pass
def clear():
pixels[:] = [(0,0,0)] * NUM_PIXELS
def show():
sys.stdout.write(" ")
for (r,g,b) in pixels:
if _true_color:
sys.stdout.write("\033[48;2;%d;%d;%dm " % (r,g,b))
else:
if r==g==b:
col = 232 + r*24//256
else:
col = 16 + (b*6//256) + (g*6//256)*6 + (r*6//256)*36
sys.stdout.write("\033[48;5;%dm " % col)
sys.stdout.write("\033[0m\r")
sys.stdout.flush()
def set_all(r, g, b, brightness=None):
global _brightness
if brightness is not None:
_brightness = brightness
pixels[:] = [(r, g, b)] * NUM_PIXELS
def set_pixel(x, r, g, b, brightness=None):
global _brightness
if brightness is not None:
_brightness = brightness
pixels[x] = (r, g, b)
def get_pixel(x):
return pixels[x]
def set_clear_on_exit(value=True):
"""Set whether Blinkt! should be cleared upon exit
By default Blinkt! will turn off the pixels on exit, but calling::
blinkt.set_clear_on_exit(False)
Will ensure that it does not.
:param value: True or False (default True)
"""
global _clear_on_exit
_clear_on_exit = value
# Module Initialisation
atexit.register(_exit)
| 826 | 0 | 161 |
3173c69743a7c472b3549e043a0891d2507a1ec6 | 320 | py | Python | tests/test_cassette.py | mfocko/requre | 14b63ea4390abadcaa193aa63b037bd08d1ea480 | [
"MIT"
] | null | null | null | tests/test_cassette.py | mfocko/requre | 14b63ea4390abadcaa193aa63b037bd08d1ea480 | [
"MIT"
] | null | null | null | tests/test_cassette.py | mfocko/requre | 14b63ea4390abadcaa193aa63b037bd08d1ea480 | [
"MIT"
] | null | null | null | from unittest import TestCase
from requre.cassette import CassetteExecution
| 26.666667 | 47 | 0.6625 | from unittest import TestCase
from requre.cassette import CassetteExecution
class Execution(TestCase):
def testCreate(self):
ce = CassetteExecution()
ce.function = lambda: "ahoj"
ce.cassette = "nothing"
self.assertEqual("ahoj", ce.function())
self.assertEqual("ahoj", ce())
| 189 | 5 | 49 |
dc7855f40ff09180648f562fab98406c09b77282 | 180 | py | Python | src/cqml/__init__.py | TheSwanFactory/cqml | 0ca1ddfa23c8fa44612cf520896f2e4159ef5d49 | [
"MIT"
] | null | null | null | src/cqml/__init__.py | TheSwanFactory/cqml | 0ca1ddfa23c8fa44612cf520896f2e4159ef5d49 | [
"MIT"
] | null | null | null | src/cqml/__init__.py | TheSwanFactory/cqml | 0ca1ddfa23c8fa44612cf520896f2e4159ef5d49 | [
"MIT"
] | null | null | null | # CQML
# Compact Query Meta-language
# https://github.com/TheSwanFactory/hclang/blob/master/hc/cqml.hc
# TODO: https://github.com/LucaCanali/sparkMeasure
from .wrappers import *
| 22.5 | 65 | 0.766667 | # CQML
# Compact Query Meta-language
# https://github.com/TheSwanFactory/hclang/blob/master/hc/cqml.hc
# TODO: https://github.com/LucaCanali/sparkMeasure
from .wrappers import *
| 0 | 0 | 0 |
19281664ee89ca7e557219fcf463320f03835ff5 | 4,329 | py | Python | src/gocept/template_rewrite/main.py | Wiseqube/gocept.template_rewrite | 28f5742de9eaf6e204e7923c7f7020c36852456b | [
"MIT"
] | 3 | 2018-08-29T12:59:16.000Z | 2019-08-21T07:41:16.000Z | src/gocept/template_rewrite/main.py | Wiseqube/gocept.template_rewrite | 28f5742de9eaf6e204e7923c7f7020c36852456b | [
"MIT"
] | 10 | 2019-08-13T12:02:02.000Z | 2021-01-21T13:52:48.000Z | src/gocept/template_rewrite/main.py | Wiseqube/gocept.template_rewrite | 28f5742de9eaf6e204e7923c7f7020c36852456b | [
"MIT"
] | 3 | 2018-09-18T11:15:04.000Z | 2021-11-29T14:13:50.000Z | from gocept.template_rewrite.dtml import DTMLRegexRewriter
from gocept.template_rewrite.lib2to3 import rewrite_using_2to3
from gocept.template_rewrite.pagetemplates import PTParseError
from gocept.template_rewrite.pagetemplates import PTParserRewriter
import argparse
import logging
import os
import os.path
import pathlib
import pdb # noqa
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description='Rewrite Python expressions in DTML and ZPT template files.')
parser.add_argument('paths', type=str, nargs='+', metavar='path',
help='paths of files which should be rewritten or '
'directories containing such files')
parser.add_argument('--keep-files', action='store_true',
help='keep the original files, create *.out files instead')
parser.add_argument('--collect-errors', action='store_true',
help='If encountering an error, continue to collect all'
' errors, print them out and only exit at the end')
parser.add_argument('--force', choices=['pt', 'dtml'], default=None,
help='Treat all files as PageTemplate (pt) resp.'
'DocumentTemplate (dtml).')
parser.add_argument('-D', '--debug', action='store_true',
help='enter debugger on errors')
class FileHandler(object):
"""Handle the rewrite of batches of files."""
def rewrite_action(self, input_string, *args, **kwargs):
"""Use `rewrite_using_2to3` as default action.
Can be overwritten in subclass.
"""
return rewrite_using_2to3(input_string, *args, **kwargs)
def _process_file(self, path, rewriter):
"""Process one file."""
log.warning('Processing %s', path)
try:
rw = rewriter(
path.read_text(), self.rewrite_action, filename=str(path))
except UnicodeDecodeError: # pragma: no cover
log.error('Error', exc_info=True)
else:
try:
result = rw()
except PTParseError:
self.errors = True
if self.collect_errors:
return
raise
file_out = pathlib.Path(str(path) + '.out')
file_out.write_text(result, encoding='utf-8')
self.output_files.append(file_out)
def process_files(self):
"""Process all collected files."""
for file_ in self.dtml_files:
self._process_file(file_, DTMLRegexRewriter)
for file_ in self.zpt_files:
self._process_file(file_, PTParserRewriter)
def main(args=None):
"""Act as an entry point."""
args = parser.parse_args(args)
fh = FileHandler(args.paths, args)
try:
fh()
except Exception: # pragma: no cover
if args.debug:
pdb.post_mortem()
raise
return 1 if fh.errors else 0
| 34.632 | 79 | 0.607993 | from gocept.template_rewrite.dtml import DTMLRegexRewriter
from gocept.template_rewrite.lib2to3 import rewrite_using_2to3
from gocept.template_rewrite.pagetemplates import PTParseError
from gocept.template_rewrite.pagetemplates import PTParserRewriter
import argparse
import logging
import os
import os.path
import pathlib
import pdb # noqa
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description='Rewrite Python expressions in DTML and ZPT template files.')
parser.add_argument('paths', type=str, nargs='+', metavar='path',
help='paths of files which should be rewritten or '
'directories containing such files')
parser.add_argument('--keep-files', action='store_true',
help='keep the original files, create *.out files instead')
parser.add_argument('--collect-errors', action='store_true',
help='If encountering an error, continue to collect all'
' errors, print them out and only exit at the end')
parser.add_argument('--force', choices=['pt', 'dtml'], default=None,
help='Treat all files as PageTemplate (pt) resp.'
'DocumentTemplate (dtml).')
parser.add_argument('-D', '--debug', action='store_true',
help='enter debugger on errors')
class FileHandler(object):
"""Handle the rewrite of batches of files."""
def __init__(self, paths, settings):
self.dtml_files = []
self.zpt_files = []
self.output_files = []
self.paths = paths
self.keep_files = settings.keep_files
self.collect_errors = settings.collect_errors
self.force_type = settings.force
self.errors = False
def __call__(self):
for path in self.paths:
self.collect_files(pathlib.Path(path))
self.process_files()
if self.errors:
log.error('Encountered errors, skipping file replacement.')
return
if not self.keep_files:
self.replace_files()
def rewrite_action(self, input_string, *args, **kwargs):
"""Use `rewrite_using_2to3` as default action.
Can be overwritten in subclass.
"""
return rewrite_using_2to3(input_string, *args, **kwargs)
def collect_files(self, path):
if path.is_dir():
for root, dirs, files in os.walk(str(path)):
for file_ in files:
self._classify_file(pathlib.Path(root, file_))
else:
self._classify_file(path)
def _classify_file(self, path):
if self.force_type == 'dtml':
self.dtml_files.append(path)
elif self.force_type == 'pt':
self.zpt_files.append(path)
elif path.suffix in ('.dtml', '.sql'):
self.dtml_files.append(path)
elif path.suffix in ('.pt', '.xpt', '.html'):
self.zpt_files.append(path)
def _process_file(self, path, rewriter):
"""Process one file."""
log.warning('Processing %s', path)
try:
rw = rewriter(
path.read_text(), self.rewrite_action, filename=str(path))
except UnicodeDecodeError: # pragma: no cover
log.error('Error', exc_info=True)
else:
try:
result = rw()
except PTParseError:
self.errors = True
if self.collect_errors:
return
raise
file_out = pathlib.Path(str(path) + '.out')
file_out.write_text(result, encoding='utf-8')
self.output_files.append(file_out)
def process_files(self):
"""Process all collected files."""
for file_ in self.dtml_files:
self._process_file(file_, DTMLRegexRewriter)
for file_ in self.zpt_files:
self._process_file(file_, PTParserRewriter)
def replace_files(self):
for path in self.output_files:
path.rename(path.parent / path.stem)
def main(args=None):
"""Act as an entry point."""
args = parser.parse_args(args)
fh = FileHandler(args.paths, args)
try:
fh()
except Exception: # pragma: no cover
if args.debug:
pdb.post_mortem()
raise
return 1 if fh.errors else 0
| 1,276 | 0 | 135 |
aa6e809a57dfb74a5cc56be92f56d5c95ac81e0c | 22,400 | py | Python | stingray/events.py | nimeshvashistha/stingray | 10530b4dbcde6c0ef8228c0e634aa202b186cf22 | [
"MIT"
] | null | null | null | stingray/events.py | nimeshvashistha/stingray | 10530b4dbcde6c0ef8228c0e634aa202b186cf22 | [
"MIT"
] | null | null | null | stingray/events.py | nimeshvashistha/stingray | 10530b4dbcde6c0ef8228c0e634aa202b186cf22 | [
"MIT"
] | null | null | null | """
Definition of :class:`EventList`.
:class:`EventList` is used to handle photon arrival times.
"""
import copy
import pickle
import warnings
import numpy as np
import numpy.random as ra
from astropy.table import Table
from .filters import get_deadtime_mask
from .gti import append_gtis, check_separate, cross_gtis
from .io import load_events_and_gtis
from .lightcurve import Lightcurve
from .utils import assign_value_if_none, simon, interpret_times
__all__ = ['EventList']
class EventList(object):
"""
Basic class for event list data. Event lists generally correspond to individual events (e.g. photons)
recorded by the detector, and their associated properties. For X-ray data where this type commonly occurs,
events are time stamps of when a photon arrived in the detector, and (optionally) the photon energy associated
with the event.
Parameters
----------
time: iterable
A list or array of time stamps
Other Parameters
----------------
dt: float
The time resolution of the events. Only relevant when using events
to produce light curves with similar bin time.
energy: iterable
A list of array of photon energy values in keV
mjdref : float
The MJD used as a reference for the time array.
ncounts: int
Number of desired data points in event list.
gtis: ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]``
Good Time Intervals
pi : integer, numpy.ndarray
PI channels
notes : str
Any useful annotations
high_precision : bool
Change the precision of self.time to float128. Useful while dealing with fast pulsars.
mission : str
Mission that recorded the data (e.g. NICER)
instr : str
Instrument onboard the mission
header : str
The full header of the original FITS file, if relevant
**other_kw :
Used internally. Any other keyword arguments will be ignored
Attributes
----------
time: numpy.ndarray
The array of event arrival times, in seconds from the reference
MJD defined in ``mjdref``
energy: numpy.ndarray
The array of photon energy values
ncounts: int
The number of data points in the event list
dt: float
The time resolution of the events. Only relevant when using events
to produce light curves with similar bin time.
mjdref : float
The MJD used as a reference for the time array.
gtis: ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]``
Good Time Intervals
pi : integer, numpy.ndarray
PI channels
high_precision : bool
Change the precision of self.time to float128. Useful while dealing with fast pulsars.
mission : str
Mission that recorded the data (e.g. NICER)
instr : str
Instrument onboard the mission
detector_id : iterable
The detector that recoded each photon, if relevant (e.g. XMM, Chandra)
header : str
The full header of the original FITS file, if relevant
"""
def to_lc(self, dt, tstart=None, tseg=None):
"""
Convert event list to a :class:`stingray.Lightcurve` object.
Parameters
----------
dt: float
Binning time of the light curve
Other Parameters
----------------
tstart : float
Start time of the light curve
tseg: float
Total duration of light curve
Returns
-------
lc: :class:`stingray.Lightcurve` object
"""
if tstart is None and self.gti is not None:
tstart = self.gti[0][0]
tseg = self.gti[-1][1] - tstart
return Lightcurve.make_lightcurve(self.time, dt, tstart=tstart,
gti=self.gti, tseg=tseg,
mjdref=self.mjdref)
def to_lc_list(self, dt):
"""Convert event list to a generator of Lightcurves.
Parameters
----------
dt: float
Binning time of the light curves
Returns
-------
lc_gen: generator
Generates one :class:`stingray.Lightcurve` object for each GTI
"""
start_times = self.gti[:, 0]
end_times = self.gti[:, 1]
tsegs = end_times - start_times
for st, end, tseg in zip(start_times, end_times, tsegs):
idx_st = np.searchsorted(self.time, st, side='right')
idx_end = np.searchsorted(self.time, end, side='left')
lc = Lightcurve.make_lightcurve(self.time[idx_st:idx_end], dt,
tstart=st,
gti=np.asarray([[st, end]]),
tseg=tseg,
mjdref=self.mjdref)
yield lc
@staticmethod
def from_lc(lc):
"""
Create an :class:`EventList` from a :class:`stingray.Lightcurve` object. Note that all
events in a given time bin will have the same time stamp.
Parameters
----------
lc: :class:`stingray.Lightcurve` object
Light curve to use for creation of the event list.
Returns
-------
ev: :class:`EventList` object
The resulting list of photon arrival times generated from the light curve.
"""
# Multiply times by number of counts
times = [[i] * int(j) for i, j in zip(lc.time, lc.counts)]
# Concatenate all lists
times = [i for j in times for i in j]
return EventList(time=times, gti=lc.gti)
def simulate_times(self, lc, use_spline=False, bin_time=None):
"""
Randomly assign (simulate) photon arrival times to an :class:`EventList` from a
:class:`stingray.Lightcurve` object, using the acceptance-rejection method.
Parameters
----------
lc: :class:`stingray.Lightcurve` object
Other Parameters
----------------
use_spline : bool
Approximate the light curve with a spline to avoid binning effects
bin_time : float
The bin time of the light curve, if it needs to be specified for
improved precision
Returns
-------
times : array-like
Simulated photon arrival times
"""
from stingray.simulator.base import simulate_times
self.time = simulate_times(lc, use_spline=use_spline,
bin_time=bin_time)
self.gti = lc.gti
self.ncounts = len(self.time)
def simulate_energies(self, spectrum):
"""
Assign (simulate) energies to event list from a spectrum.
Parameters
----------
spectrum: 2-d array or list
Energies versus corresponding fluxes. The 2-d array or list must
have energies across the first dimension and fluxes across the
second one.
"""
if self.ncounts is None:
simon("Either set time values or explicity provide counts.")
return
if isinstance(spectrum, list) or isinstance(spectrum, np.ndarray):
energy = np.asarray(spectrum)[0]
fluxes = np.asarray(spectrum)[1]
if not isinstance(energy, np.ndarray):
raise IndexError("Spectrum must be a 2-d array or list")
else:
raise TypeError("Spectrum must be a 2-d array or list")
# Create a set of probability values
prob = fluxes / float(sum(fluxes))
# Calculate cumulative probability
cum_prob = np.cumsum(prob)
# Draw N random numbers between 0 and 1, where N is the size of event
# list
R = ra.uniform(0, 1, self.ncounts)
# Assign energies to events corresponding to the random numbers drawn
self.energy = \
np.asarray([
energy[np.argwhere(
cum_prob == np.min(cum_prob[(cum_prob - r) > 0]))]
for r in R])
def join(self, other):
"""
Join two :class:`EventList` objects into one.
If both are empty, an empty :class:`EventList` is returned.
GTIs are crossed if the event lists are over a common time interval,
and appended otherwise.
``pi`` and ``pha`` remain ``None`` if they are ``None`` in both. Otherwise, 0 is used
as a default value for the :class:`EventList` where they were None.
Parameters
----------
other : :class:`EventList` object
The other :class:`EventList` object which is supposed to be joined with.
Returns
-------
`ev_new` : :class:`EventList` object
The resulting :class:`EventList` object.
"""
ev_new = EventList()
if self.dt != other.dt:
simon("The time resolution is different."
" Using the rougher by default")
ev_new.dt = np.max([self.dt, other.dt])
if self.time is None and other.time is None:
return ev_new
if (self.time is None):
simon("One of the event lists you are concatenating is empty.")
self.time = np.asarray([])
elif (other.time is None):
simon("One of the event lists you are concatenating is empty.")
other.time = np.asarray([])
# Tolerance for MJDREF:1 microsecond
if not np.isclose(self.mjdref, other.mjdref, atol=1e-6 / 86400):
other = other.change_mjdref(self.mjdref)
ev_new.time = np.concatenate([self.time, other.time])
order = np.argsort(ev_new.time)
ev_new.time = ev_new.time[order]
if (self.pi is None) and (other.pi is None):
ev_new.pi = None
elif (self.pi is None) or (other.pi is None):
self.pi = assign_value_if_none(self.pi, np.zeros_like(self.time))
other.pi = assign_value_if_none(other.pi,
np.zeros_like(other.time))
if (self.pi is not None) and (other.pi is not None):
ev_new.pi = np.concatenate([self.pi, other.pi])
ev_new.pi = ev_new.pi[order]
if (self.energy is None) and (other.energy is None):
ev_new.energy = None
elif (self.energy is None) or (other.energy is None):
self.energy = assign_value_if_none(self.energy,
np.zeros_like(self.time))
other.energy = assign_value_if_none(other.energy,
np.zeros_like(other.time))
if (self.energy is not None) and (other.energy is not None):
ev_new.energy = np.concatenate([self.energy, other.energy])
ev_new.energy = ev_new.energy[order]
if self.gti is None and other.gti is not None and len(self.time) > 0:
self.gti = \
assign_value_if_none(
self.gti, np.asarray([[self.time[0] - self.dt / 2,
self.time[-1] + self.dt / 2]]))
if other.gti is None and self.gti is not None and len(other.time) > 0:
other.gti = \
assign_value_if_none(
other.gti, np.asarray([[other.time[0] - other.dt / 2,
other.time[-1] + other.dt / 2]]))
if (self.gti is None) and (other.gti is None):
ev_new.gti = None
elif (self.gti is not None) and (other.gti is not None):
if check_separate(self.gti, other.gti):
ev_new.gti = append_gtis(self.gti, other.gti)
simon('GTIs in these two event lists do not overlap at all.'
'Merging instead of returning an overlap.')
else:
ev_new.gti = cross_gtis([self.gti, other.gti])
ev_new.mjdref = self.mjdref
return ev_new
@staticmethod
def read(filename, format_="pickle", **kwargs):
"""
Read a :class:`Lightcurve` object from file.
Currently supported formats are
* pickle (not recommended for long-term storage)
* hea : FITS Event files from (well, some) HEASARC-supported missions.
* any other formats compatible with the writers in
:class:`astropy.table.Table` (ascii.ecsv, hdf5, etc.)
Files that need the :class:`astropy.table.Table` interface MUST contain
at least a ``time`` column. Other recognized columns are ``energy`` and
``pi``.
The default ascii format is enhanced CSV (ECSV). Data formats
supporting the serialization of metadata (such as ECSV and HDF5) can
contain all eventlist attributes such as ``mission``, ``gti``, etc with
no significant loss of information. Other file formats might lose part
of the metadata, so must be used with care.
Parameters
----------
filename: str
Path and file name for the file to be read.
format\_: str
Available options are 'pickle', 'hea', and any `Table`-supported
format such as 'hdf5', 'ascii.ecsv', etc.
Returns
-------
ev: :class:`EventList` object
The :class:`EventList` object reconstructed from file
"""
if format_ == 'pickle':
with open(filename, 'rb') as fobj:
return pickle.load(fobj)
if format_ in ('hea'):
evtdata = load_events_and_gtis(filename, **kwargs)
return EventList(time=evtdata.ev_list,
gti=evtdata.gti_list,
pi=evtdata.pi_list,
energy=evtdata.energy_list,
mjdref=evtdata.mjdref,
instr=evtdata.instr,
mission=evtdata.mission,
header=evtdata.header,
detector_id=evtdata.detector_id)
if format_ == 'ascii':
format_ = 'ascii.ecsv'
ts = Table.read(filename, format=format_)
return EventList.from_astropy_table(ts)
def write(self, filename, format_='pickle'):
"""
Write an :class:`EventList` object to file.
Possible file formats are
* pickle (not recommended for long-term storage)
* any other formats compatible with the writers in
:class:`astropy.table.Table` (ascii.ecsv, hdf5, etc.)
Parameters
----------
filename: str
Name and path of the file to save the event list to..
format_: str
The file format to store the data in.
Available options are ``pickle``, ``hdf5``, ``ascii``, ``fits``
"""
if format_ == 'pickle':
with open(filename, "wb") as fobj:
pickle.dump(self, fobj)
return
if format_ == 'ascii':
format_ = 'ascii.ecsv'
ts = self.to_astropy_table()
try:
ts.write(filename, format=format_, overwrite=True,
serialize_meta=True)
except TypeError:
ts.write(filename, format=format_, overwrite=True)
def apply_deadtime(self, deadtime, inplace=False, **kwargs):
"""Apply deadtime filter to this event list.
Additional arguments in ``kwargs`` are passed to `get_deadtime_mask`
Parameters
----------
deadtime : float
Value of dead time to apply to data
inplace : bool, default False
If True, apply the deadtime to the current event list. Otherwise,
return a new event list.
Returns
-------
new_event_list : `EventList` object
Filtered event list. if `inplace` is True, this is the input object
filtered for deadtime, otherwise this is a new object.
additional_output : object
Only returned if `return_all` is True. See `get_deadtime_mask` for
more details.
Examples
--------
>>> events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
>>> events = EventList(events)
>>> events.pi=np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
>>> events.energy=np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
>>> events.mjdref = 10
>>> filt_events, retval = events.apply_deadtime(0.11, inplace=False,
... verbose=False,
... return_all=True)
>>> filt_events is events
False
>>> expected = np.array([1, 2, 2.2, 3, 3.2])
>>> np.allclose(filt_events.time, expected)
True
>>> np.allclose(filt_events.pi, 1)
True
>>> np.allclose(filt_events.energy, 1)
True
>>> np.allclose(events.pi, 1)
False
>>> filt_events = events.apply_deadtime(0.11, inplace=True,
... verbose=False)
>>> filt_events is events
True
"""
local_retall = kwargs.pop('return_all', False)
mask, retall = get_deadtime_mask(self.time, deadtime,
return_all=True,
**kwargs)
new_ev = self.apply_mask(mask, inplace=inplace)
if local_retall:
new_ev = [new_ev, retall]
return new_ev
def change_mjdref(self, new_mjdref):
"""Change the MJD reference time (MJDREF) of the light curve.
Times will be now referred to this new MJDREF
Parameters
----------
new_mjdref : float
New MJDREF
Returns
-------
new_lc : :class:`EventList` object
The new LC shifted by MJDREF
"""
time_shift = (self.mjdref - new_mjdref) * 86400
new_ev = self.shift(time_shift)
new_ev.mjdref = new_mjdref
return new_ev
def shift(self, time_shift):
"""
Shift the events and the GTIs in time.
Parameters
----------
time_shift: float
The time interval by which the light curve will be shifted (in
the same units as the time array in :class:`Lightcurve`
Returns
-------
new_ev : lightcurve.Lightcurve object
The new event list shifted by ``time_shift``
"""
new_ev = copy.deepcopy(self)
new_ev.time = new_ev.time + time_shift
new_ev.gti = new_ev.gti + time_shift
return new_ev
@staticmethod
@staticmethod
| 33.333333 | 114 | 0.557902 | """
Definition of :class:`EventList`.
:class:`EventList` is used to handle photon arrival times.
"""
import copy
import pickle
import warnings
import numpy as np
import numpy.random as ra
from astropy.table import Table
from .filters import get_deadtime_mask
from .gti import append_gtis, check_separate, cross_gtis
from .io import load_events_and_gtis
from .lightcurve import Lightcurve
from .utils import assign_value_if_none, simon, interpret_times
__all__ = ['EventList']
class EventList(object):
"""
Basic class for event list data. Event lists generally correspond to individual events (e.g. photons)
recorded by the detector, and their associated properties. For X-ray data where this type commonly occurs,
events are time stamps of when a photon arrived in the detector, and (optionally) the photon energy associated
with the event.
Parameters
----------
time: iterable
A list or array of time stamps
Other Parameters
----------------
dt: float
The time resolution of the events. Only relevant when using events
to produce light curves with similar bin time.
energy: iterable
A list of array of photon energy values in keV
mjdref : float
The MJD used as a reference for the time array.
ncounts: int
Number of desired data points in event list.
gtis: ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]``
Good Time Intervals
pi : integer, numpy.ndarray
PI channels
notes : str
Any useful annotations
high_precision : bool
Change the precision of self.time to float128. Useful while dealing with fast pulsars.
mission : str
Mission that recorded the data (e.g. NICER)
instr : str
Instrument onboard the mission
header : str
The full header of the original FITS file, if relevant
**other_kw :
Used internally. Any other keyword arguments will be ignored
Attributes
----------
time: numpy.ndarray
The array of event arrival times, in seconds from the reference
MJD defined in ``mjdref``
energy: numpy.ndarray
The array of photon energy values
ncounts: int
The number of data points in the event list
dt: float
The time resolution of the events. Only relevant when using events
to produce light curves with similar bin time.
mjdref : float
The MJD used as a reference for the time array.
gtis: ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]``
Good Time Intervals
pi : integer, numpy.ndarray
PI channels
high_precision : bool
Change the precision of self.time to float128. Useful while dealing with fast pulsars.
mission : str
Mission that recorded the data (e.g. NICER)
instr : str
Instrument onboard the mission
detector_id : iterable
The detector that recoded each photon, if relevant (e.g. XMM, Chandra)
header : str
The full header of the original FITS file, if relevant
"""
def __init__(self, time=None, energy=None, ncounts=None, mjdref=0, dt=0,
notes="", gti=None, pi=None, high_precision=False,
mission=None, instr=None, header=None, detector_id=None,
**other_kw):
self.energy = None if energy is None else np.asarray(energy)
self.notes = notes
self.dt = dt
self.mjdref = mjdref
self.gti = np.asarray(gti) if gti is not None else None
self.pi = pi
self.ncounts = ncounts
self.mission = mission
self.instr = instr
self.detector_id = detector_id
self.header = header
if other_kw != {}:
warnings.warn(f"Unrecognized keywords: {list(other_kw.keys())}")
if time is not None:
time, mjdref = interpret_times(time, mjdref)
if not high_precision:
self.time = np.asarray(time)
else:
self.time = np.asarray(time, dtype=np.longdouble)
self.ncounts = self.time.size
else:
self.time = None
if (self.time is not None) and (self.energy is not None):
if self.time.size != self.energy.size:
raise ValueError('Lengths of time and energy must be equal.')
def to_lc(self, dt, tstart=None, tseg=None):
"""
Convert event list to a :class:`stingray.Lightcurve` object.
Parameters
----------
dt: float
Binning time of the light curve
Other Parameters
----------------
tstart : float
Start time of the light curve
tseg: float
Total duration of light curve
Returns
-------
lc: :class:`stingray.Lightcurve` object
"""
if tstart is None and self.gti is not None:
tstart = self.gti[0][0]
tseg = self.gti[-1][1] - tstart
return Lightcurve.make_lightcurve(self.time, dt, tstart=tstart,
gti=self.gti, tseg=tseg,
mjdref=self.mjdref)
def to_lc_list(self, dt):
"""Convert event list to a generator of Lightcurves.
Parameters
----------
dt: float
Binning time of the light curves
Returns
-------
lc_gen: generator
Generates one :class:`stingray.Lightcurve` object for each GTI
"""
start_times = self.gti[:, 0]
end_times = self.gti[:, 1]
tsegs = end_times - start_times
for st, end, tseg in zip(start_times, end_times, tsegs):
idx_st = np.searchsorted(self.time, st, side='right')
idx_end = np.searchsorted(self.time, end, side='left')
lc = Lightcurve.make_lightcurve(self.time[idx_st:idx_end], dt,
tstart=st,
gti=np.asarray([[st, end]]),
tseg=tseg,
mjdref=self.mjdref)
yield lc
@staticmethod
def from_lc(lc):
"""
Create an :class:`EventList` from a :class:`stingray.Lightcurve` object. Note that all
events in a given time bin will have the same time stamp.
Parameters
----------
lc: :class:`stingray.Lightcurve` object
Light curve to use for creation of the event list.
Returns
-------
ev: :class:`EventList` object
The resulting list of photon arrival times generated from the light curve.
"""
# Multiply times by number of counts
times = [[i] * int(j) for i, j in zip(lc.time, lc.counts)]
# Concatenate all lists
times = [i for j in times for i in j]
return EventList(time=times, gti=lc.gti)
def simulate_times(self, lc, use_spline=False, bin_time=None):
"""
Randomly assign (simulate) photon arrival times to an :class:`EventList` from a
:class:`stingray.Lightcurve` object, using the acceptance-rejection method.
Parameters
----------
lc: :class:`stingray.Lightcurve` object
Other Parameters
----------------
use_spline : bool
Approximate the light curve with a spline to avoid binning effects
bin_time : float
The bin time of the light curve, if it needs to be specified for
improved precision
Returns
-------
times : array-like
Simulated photon arrival times
"""
from stingray.simulator.base import simulate_times
self.time = simulate_times(lc, use_spline=use_spline,
bin_time=bin_time)
self.gti = lc.gti
self.ncounts = len(self.time)
def simulate_energies(self, spectrum):
"""
Assign (simulate) energies to event list from a spectrum.
Parameters
----------
spectrum: 2-d array or list
Energies versus corresponding fluxes. The 2-d array or list must
have energies across the first dimension and fluxes across the
second one.
"""
if self.ncounts is None:
simon("Either set time values or explicity provide counts.")
return
if isinstance(spectrum, list) or isinstance(spectrum, np.ndarray):
energy = np.asarray(spectrum)[0]
fluxes = np.asarray(spectrum)[1]
if not isinstance(energy, np.ndarray):
raise IndexError("Spectrum must be a 2-d array or list")
else:
raise TypeError("Spectrum must be a 2-d array or list")
# Create a set of probability values
prob = fluxes / float(sum(fluxes))
# Calculate cumulative probability
cum_prob = np.cumsum(prob)
# Draw N random numbers between 0 and 1, where N is the size of event
# list
R = ra.uniform(0, 1, self.ncounts)
# Assign energies to events corresponding to the random numbers drawn
self.energy = \
np.asarray([
energy[np.argwhere(
cum_prob == np.min(cum_prob[(cum_prob - r) > 0]))]
for r in R])
def join(self, other):
"""
Join two :class:`EventList` objects into one.
If both are empty, an empty :class:`EventList` is returned.
GTIs are crossed if the event lists are over a common time interval,
and appended otherwise.
``pi`` and ``pha`` remain ``None`` if they are ``None`` in both. Otherwise, 0 is used
as a default value for the :class:`EventList` where they were None.
Parameters
----------
other : :class:`EventList` object
The other :class:`EventList` object which is supposed to be joined with.
Returns
-------
`ev_new` : :class:`EventList` object
The resulting :class:`EventList` object.
"""
ev_new = EventList()
if self.dt != other.dt:
simon("The time resolution is different."
" Using the rougher by default")
ev_new.dt = np.max([self.dt, other.dt])
if self.time is None and other.time is None:
return ev_new
if (self.time is None):
simon("One of the event lists you are concatenating is empty.")
self.time = np.asarray([])
elif (other.time is None):
simon("One of the event lists you are concatenating is empty.")
other.time = np.asarray([])
# Tolerance for MJDREF:1 microsecond
if not np.isclose(self.mjdref, other.mjdref, atol=1e-6 / 86400):
other = other.change_mjdref(self.mjdref)
ev_new.time = np.concatenate([self.time, other.time])
order = np.argsort(ev_new.time)
ev_new.time = ev_new.time[order]
if (self.pi is None) and (other.pi is None):
ev_new.pi = None
elif (self.pi is None) or (other.pi is None):
self.pi = assign_value_if_none(self.pi, np.zeros_like(self.time))
other.pi = assign_value_if_none(other.pi,
np.zeros_like(other.time))
if (self.pi is not None) and (other.pi is not None):
ev_new.pi = np.concatenate([self.pi, other.pi])
ev_new.pi = ev_new.pi[order]
if (self.energy is None) and (other.energy is None):
ev_new.energy = None
elif (self.energy is None) or (other.energy is None):
self.energy = assign_value_if_none(self.energy,
np.zeros_like(self.time))
other.energy = assign_value_if_none(other.energy,
np.zeros_like(other.time))
if (self.energy is not None) and (other.energy is not None):
ev_new.energy = np.concatenate([self.energy, other.energy])
ev_new.energy = ev_new.energy[order]
if self.gti is None and other.gti is not None and len(self.time) > 0:
self.gti = \
assign_value_if_none(
self.gti, np.asarray([[self.time[0] - self.dt / 2,
self.time[-1] + self.dt / 2]]))
if other.gti is None and self.gti is not None and len(other.time) > 0:
other.gti = \
assign_value_if_none(
other.gti, np.asarray([[other.time[0] - other.dt / 2,
other.time[-1] + other.dt / 2]]))
if (self.gti is None) and (other.gti is None):
ev_new.gti = None
elif (self.gti is not None) and (other.gti is not None):
if check_separate(self.gti, other.gti):
ev_new.gti = append_gtis(self.gti, other.gti)
simon('GTIs in these two event lists do not overlap at all.'
'Merging instead of returning an overlap.')
else:
ev_new.gti = cross_gtis([self.gti, other.gti])
ev_new.mjdref = self.mjdref
return ev_new
@staticmethod
def read(filename, format_="pickle", **kwargs):
"""
Read a :class:`Lightcurve` object from file.
Currently supported formats are
* pickle (not recommended for long-term storage)
* hea : FITS Event files from (well, some) HEASARC-supported missions.
* any other formats compatible with the writers in
:class:`astropy.table.Table` (ascii.ecsv, hdf5, etc.)
Files that need the :class:`astropy.table.Table` interface MUST contain
at least a ``time`` column. Other recognized columns are ``energy`` and
``pi``.
The default ascii format is enhanced CSV (ECSV). Data formats
supporting the serialization of metadata (such as ECSV and HDF5) can
contain all eventlist attributes such as ``mission``, ``gti``, etc with
no significant loss of information. Other file formats might lose part
of the metadata, so must be used with care.
Parameters
----------
filename: str
Path and file name for the file to be read.
format\_: str
Available options are 'pickle', 'hea', and any `Table`-supported
format such as 'hdf5', 'ascii.ecsv', etc.
Returns
-------
ev: :class:`EventList` object
The :class:`EventList` object reconstructed from file
"""
if format_ == 'pickle':
with open(filename, 'rb') as fobj:
return pickle.load(fobj)
if format_ in ('hea'):
evtdata = load_events_and_gtis(filename, **kwargs)
return EventList(time=evtdata.ev_list,
gti=evtdata.gti_list,
pi=evtdata.pi_list,
energy=evtdata.energy_list,
mjdref=evtdata.mjdref,
instr=evtdata.instr,
mission=evtdata.mission,
header=evtdata.header,
detector_id=evtdata.detector_id)
if format_ == 'ascii':
format_ = 'ascii.ecsv'
ts = Table.read(filename, format=format_)
return EventList.from_astropy_table(ts)
def write(self, filename, format_='pickle'):
"""
Write an :class:`EventList` object to file.
Possible file formats are
* pickle (not recommended for long-term storage)
* any other formats compatible with the writers in
:class:`astropy.table.Table` (ascii.ecsv, hdf5, etc.)
Parameters
----------
filename: str
Name and path of the file to save the event list to..
format_: str
The file format to store the data in.
Available options are ``pickle``, ``hdf5``, ``ascii``, ``fits``
"""
if format_ == 'pickle':
with open(filename, "wb") as fobj:
pickle.dump(self, fobj)
return
if format_ == 'ascii':
format_ = 'ascii.ecsv'
ts = self.to_astropy_table()
try:
ts.write(filename, format=format_, overwrite=True,
serialize_meta=True)
except TypeError:
ts.write(filename, format=format_, overwrite=True)
def apply_mask(self, mask, inplace=False):
if inplace:
new_ev = self
else:
new_ev = copy.deepcopy(self)
for attr in 'time', 'energy', 'pi':
if hasattr(new_ev, attr):
setattr(new_ev, attr, getattr(new_ev, attr)[mask])
return new_ev
def apply_deadtime(self, deadtime, inplace=False, **kwargs):
"""Apply deadtime filter to this event list.
Additional arguments in ``kwargs`` are passed to `get_deadtime_mask`
Parameters
----------
deadtime : float
Value of dead time to apply to data
inplace : bool, default False
If True, apply the deadtime to the current event list. Otherwise,
return a new event list.
Returns
-------
new_event_list : `EventList` object
Filtered event list. if `inplace` is True, this is the input object
filtered for deadtime, otherwise this is a new object.
additional_output : object
Only returned if `return_all` is True. See `get_deadtime_mask` for
more details.
Examples
--------
>>> events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
>>> events = EventList(events)
>>> events.pi=np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
>>> events.energy=np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
>>> events.mjdref = 10
>>> filt_events, retval = events.apply_deadtime(0.11, inplace=False,
... verbose=False,
... return_all=True)
>>> filt_events is events
False
>>> expected = np.array([1, 2, 2.2, 3, 3.2])
>>> np.allclose(filt_events.time, expected)
True
>>> np.allclose(filt_events.pi, 1)
True
>>> np.allclose(filt_events.energy, 1)
True
>>> np.allclose(events.pi, 1)
False
>>> filt_events = events.apply_deadtime(0.11, inplace=True,
... verbose=False)
>>> filt_events is events
True
"""
local_retall = kwargs.pop('return_all', False)
mask, retall = get_deadtime_mask(self.time, deadtime,
return_all=True,
**kwargs)
new_ev = self.apply_mask(mask, inplace=inplace)
if local_retall:
new_ev = [new_ev, retall]
return new_ev
def change_mjdref(self, new_mjdref):
"""Change the MJD reference time (MJDREF) of the light curve.
Times will be now referred to this new MJDREF
Parameters
----------
new_mjdref : float
New MJDREF
Returns
-------
new_lc : :class:`EventList` object
The new LC shifted by MJDREF
"""
time_shift = (self.mjdref - new_mjdref) * 86400
new_ev = self.shift(time_shift)
new_ev.mjdref = new_mjdref
return new_ev
def shift(self, time_shift):
"""
Shift the events and the GTIs in time.
Parameters
----------
time_shift: float
The time interval by which the light curve will be shifted (in
the same units as the time array in :class:`Lightcurve`
Returns
-------
new_ev : lightcurve.Lightcurve object
The new event list shifted by ``time_shift``
"""
new_ev = copy.deepcopy(self)
new_ev.time = new_ev.time + time_shift
new_ev.gti = new_ev.gti + time_shift
return new_ev
def to_astropy_timeseries(self):
from astropy.timeseries import TimeSeries
from astropy.time import TimeDelta
from astropy import units as u
data = {}
for attr in ['energy', 'pi']:
if hasattr(self, attr) and getattr(self, attr) is not None:
data[attr] = np.asarray(getattr(self, attr))
if data == {}:
data = None
if self.time is not None and self.time.size > 0:
times = TimeDelta(self.time * u.s)
ts = TimeSeries(data=data, time=times)
else:
ts = TimeSeries()
ts.meta['gti'] = self.gti
ts.meta['mjdref'] = self.mjdref
ts.meta['instr'] = self.instr
ts.meta['mission'] = self.mission
ts.meta['header'] = self.header
return ts
@staticmethod
def from_astropy_timeseries(ts):
from astropy.timeseries import TimeSeries
from astropy import units as u
energy = pi = gti = instr = mission = mjdref = None
if 'energy' in ts.colnames:
energy = ts['energy']
if 'pi' in ts.colnames:
pi = ts['pi']
kwargs = ts.meta
ev = EventList(time=ts.time, energy=energy, pi=pi, **kwargs)
return ev
def to_astropy_table(self):
data = {}
for attr in ['time', 'energy', 'pi']:
if hasattr(self, attr) and getattr(self, attr) is not None:
data[attr] = np.asarray(getattr(self, attr))
ts = Table(data)
ts.meta['gti'] = self.gti
ts.meta['mjdref'] = self.mjdref
ts.meta['instr'] = self.instr
ts.meta['mission'] = self.mission
ts.meta['header'] = self.header
return ts
@staticmethod
def from_astropy_table(ts):
kwargs = dict([(key.lower(), val) for (key, val) in ts.meta.items()])
for attr in ['time', 'energy', 'pi']:
if attr in ts.colnames:
kwargs[attr] = ts[attr]
ev = EventList(**kwargs)
return ev
| 3,437 | 0 | 159 |
c9ed1a015281c5d543377975b091e92c8f907d7b | 3,961 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | piehlm/CarND-Capstone | 53b38f8da0d8e2fb5d154bb84f451c75f3397073 | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | piehlm/CarND-Capstone | 53b38f8da0d8e2fb5d154bb84f451c75f3397073 | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | piehlm/CarND-Capstone | 53b38f8da0d8e2fb5d154bb84f451c75f3397073 | [
"MIT"
] | null | null | null | from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
import tensorflow as tf
import datetime
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
| 38.456311 | 93 | 0.635193 | from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
import tensorflow as tf
import datetime
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class TLClassifier(object):
def __init__(self, is_simulation):
'''
self.image_pub = rospy.Publisher("image_topic_2", Image)
self.bridge = CvBridge()
'''
if (is_simulation):
self.MODEL_NAME = 'light_classification/frozen-ssd_inception-simulation'
else:
self.MODEL_NAME = 'light_classification/frozen-ssd_inception-site'
self.PATH_TO_FROZEN_GRAPH = self.MODEL_NAME + '/frozen_inference_graph.pb'
#TODO load classifier
# load a frozen tensorflow model into memory
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
self.session = tf.session(graph=self.detection_graph)
self.threshold = 0.5
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
# convert to rgb image
# image = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
image_rgb = image
#image normalization
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
#equalize the histogram of the y channel
img_yuv[:,:,0] = cv2.equalizeHisy(img_yuv[:,:,0])
#convert the yuv image back to rgb
image_rgb = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
#cv2.imshow('Color Input Image', image)
#cv2.imshow('Histogram Equalized', image_rgb)
#cv2.waitKey()
image_rgb = image
self.image_pub.publish(self.bridge.cv2_to_imgmsg(image_rgb, "rgb8"))
with self.detection_graph.as_default():
image_expand = np.expand_dims(image_rgb, axis=0)
start_classification_t = datetime.datetime.now()
(boxes, scores, classes, num_detections) = self.session.run(
[self.boxes, self.scores, self.classes, self.num_detections],
feed_dict = {self.image_tensor: image_expand})
end_classification_t = datetime.datetime.now()
elapsed_time = end_classification_t - start_classification_t
#print("Classification Took " elapsed_time.total_seconds())
boxes = np.squeeze(boxes)
classes = np.squeeze(classes)
scores = np.squeeze(scores)
#print("Best Class:", classes[0])
#print("Best Score:", scores[0])
if scores[0] > self.threshold:
if classes[0] == 1:
print("Traffic light is Green")
return TrafficLight.GREEN
elif classes[0] == 2:
print("Traffic light is Red")
return TrafficLight.RED
elif classes[0] == 3:
print("Traffic light is Yellow")
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| 0 | 3,702 | 23 |
4e06ec75f27c21b311de337c41d0325864cd8128 | 4,919 | py | Python | src/pythonScripts/modules/DataChecks.py | colebrookson/research-access | cd36a516d8a08fe98e726ea8d7fc7dfa5828b28f | [
"CC-BY-4.0"
] | null | null | null | src/pythonScripts/modules/DataChecks.py | colebrookson/research-access | cd36a516d8a08fe98e726ea8d7fc7dfa5828b28f | [
"CC-BY-4.0"
] | 3 | 2021-08-10T13:44:16.000Z | 2021-08-14T17:46:54.000Z | src/pythonScripts/modules/DataChecks.py | colebrookson/research-access | cd36a516d8a08fe98e726ea8d7fc7dfa5828b28f | [
"CC-BY-4.0"
] | null | null | null | import numpy as np
import pandas as pd
def validate_cISSN(issn:str) -> bool:
"""
Validates the last character (c) of the ISSN number, based on the first 7 digits
returns: boolean: True if c is valid False otherwise
"""
assert type(issn) == str, "issn must be a string"
issn_num = issn[:4] + issn[5:-1]
issn_c = issn[-1]
# check c validity
issn_num_sum = 0
inv_index = 8
for num in issn_num:
num = int(num)
issn_num_sum += num*inv_index
inv_index -= 1
mod = issn_num_sum%11
if mod == 0: c = 0
else:
c = 11-mod
if c == 10: c = 'X'
return str(c) == issn_c
# print(validate_cISSN("0046-225X"))
def hasAllColumns(df:pd.core.frame.DataFrame) -> "tuple(bool, bool, bool)":
"""
Checks if there are any missing columns in the df
returns: (True, [missing cols]) if missing cols present, (False, []) otherwise
"""
COLS = ["journal", "issn", "access", "notes"]
df_cols = []
hasAllCols = True
missingCols = []
for col in list(df.columns):
col = str(col)
col = "".join(col.split())
df_cols.append(col)
for col in COLS:
hasAllCols = col in df_cols
if not hasAllCols: missingCols.append(col)
return hasAllCols, missingCols
def noDuplicates(df):
"""
Checks if there are any duplicated rows in the df
"""
noDuplicates = list(df.duplicated().unique()) == [False]
return noDuplicates
def hasNaN(df:pd.core.frame.DataFrame, includeNotes=False) -> "tuple(bool, list)":
"""
Checks if there are any missing values in each column of the df
returns: (True, [cols with missing values]) if missing values present in any column, (False, []) otherwise
"""
if not includeNotes:
df = df.drop("notes", inplace=False, axis=1)
df.fillna(value=np.nan, inplace=True) # for replacing None values (.replace does not work with None)
oddWords = ["missing", "MISSING", "Missing", "null", "Null", "NULL",
"None", "none", "NONE", "N/A", "n/a", "-", '', ' ',
" ", " ", "x", np.inf]
for word in oddWords:
df.replace(word, np.nan, inplace=True)
isnullCols = {}
hasNaNCols = []
hasNaN = False
for col in df.columns:
isnullCols[col] = df[col].isnull().unique()
if (True in isnullCols[col]):
hasNaN = True
hasNaNCols.append(col)
return hasNaN, hasNaNCols
def allJournalsCounted(df:pd.core.frame.DataFrame, allJournals:list) -> "tuple(bool, list)":
"""
Checks if all journals are recorded for a university df
returns: (True, []) if all journals are present, (False, [uncountedJournals]) otherwise
"""
df_journals = list(df["journal"])
uncountedJournals = []
allAreCounted = True
for journal in allJournals:
if journal not in df_journals:
allAreCounted = False
uncountedJournals.append(journal)
return allAreCounted, uncountedJournals
def journalsMatchISSN(gtruth_df:pd.core.frame.DataFrame, observed_df:pd.core.frame.DataFrame) -> "tuple(bool, list)":
"""
Compares the journal, ISSN pairing in gtruth_df with observed_df to check if they match.
Both parameters require pd.DataFrames with two columns (journal, issn).
returns: (True, []) if no mismatch found, (False, [mismatchedJournals]) otherwise
"""
gTruthDf = gtruth_df.set_index("journal", inplace=False).astype("string")
observedDf = observed_df.set_index("journal", inplace=False).astype("string")
mismatchedJournals = []
noMismatch = True
for j in list(gTruthDf.index):
if str(gTruthDf.loc[j]) != str(observedDf.loc[j]):
noMismatch = False
mismatchedJournals.append(j)
return noMismatch, mismatchedJournals
| 30.937107 | 118 | 0.60988 | import numpy as np
import pandas as pd
class DataChecksException(Exception):
def __init__(self, msg, sheetID, ref, detail):
self.msg = msg
self.sheetID = sheetID
self.ref = ref
self.detail = detail
def getSheetID(self):
return self.sheetID
def getType(self):
return self.type
def getDetail(self):
return self.detail
def __str__(self):
return f"{self.msg} \nSheetID: {self.sheetID} \nRef: {self.ref} \nDetail: {self.detail}"
def check_issn(issn:str) -> bool:
# TODO:
# - check that the first 7 digits are numbers and the last digit is either a number or "X"
# - check that the last character is valid (matches the calculations based on the first 7 digits)
# - check that t
isVALID_C = validate_cISSN(issn)
pass
def check_journal(name:str) -> bool:
pass
def check_access(zero_or_one:int) -> bool:
pass
def check_notes(notes:str) -> bool:
pass
def validate_cISSN(issn:str) -> bool:
"""
Validates the last character (c) of the ISSN number, based on the first 7 digits
returns: boolean: True if c is valid False otherwise
"""
assert type(issn) == str, "issn must be a string"
issn_num = issn[:4] + issn[5:-1]
issn_c = issn[-1]
# check c validity
issn_num_sum = 0
inv_index = 8
for num in issn_num:
num = int(num)
issn_num_sum += num*inv_index
inv_index -= 1
mod = issn_num_sum%11
if mod == 0: c = 0
else:
c = 11-mod
if c == 10: c = 'X'
return str(c) == issn_c
# print(validate_cISSN("0046-225X"))
def hasAllColumns(df:pd.core.frame.DataFrame) -> "tuple(bool, bool, bool)":
"""
Checks if there are any missing columns in the df
returns: (True, [missing cols]) if missing cols present, (False, []) otherwise
"""
COLS = ["journal", "issn", "access", "notes"]
df_cols = []
hasAllCols = True
missingCols = []
for col in list(df.columns):
col = str(col)
col = "".join(col.split())
df_cols.append(col)
for col in COLS:
hasAllCols = col in df_cols
if not hasAllCols: missingCols.append(col)
return hasAllCols, missingCols
def noDuplicates(df):
"""
Checks if there are any duplicated rows in the df
"""
noDuplicates = list(df.duplicated().unique()) == [False]
return noDuplicates
def hasNaN(df:pd.core.frame.DataFrame, includeNotes=False) -> "tuple(bool, list)":
"""
Checks if there are any missing values in each column of the df
returns: (True, [cols with missing values]) if missing values present in any column, (False, []) otherwise
"""
if not includeNotes:
df = df.drop("notes", inplace=False, axis=1)
df.fillna(value=np.nan, inplace=True) # for replacing None values (.replace does not work with None)
oddWords = ["missing", "MISSING", "Missing", "null", "Null", "NULL",
"None", "none", "NONE", "N/A", "n/a", "-", '', ' ',
" ", " ", "x", np.inf]
for word in oddWords:
df.replace(word, np.nan, inplace=True)
isnullCols = {}
hasNaNCols = []
hasNaN = False
for col in df.columns:
isnullCols[col] = df[col].isnull().unique()
if (True in isnullCols[col]):
hasNaN = True
hasNaNCols.append(col)
return hasNaN, hasNaNCols
def allJournalsCounted(df:pd.core.frame.DataFrame, allJournals:list) -> "tuple(bool, list)":
"""
Checks if all journals are recorded for a university df
returns: (True, []) if all journals are present, (False, [uncountedJournals]) otherwise
"""
df_journals = list(df["journal"])
uncountedJournals = []
allAreCounted = True
for journal in allJournals:
if journal not in df_journals:
allAreCounted = False
uncountedJournals.append(journal)
return allAreCounted, uncountedJournals
def journalsMatchISSN(gtruth_df:pd.core.frame.DataFrame, observed_df:pd.core.frame.DataFrame) -> "tuple(bool, list)":
"""
Compares the journal, ISSN pairing in gtruth_df with observed_df to check if they match.
Both parameters require pd.DataFrames with two columns (journal, issn).
returns: (True, []) if no mismatch found, (False, [mismatchedJournals]) otherwise
"""
gTruthDf = gtruth_df.set_index("journal", inplace=False).astype("string")
observedDf = observed_df.set_index("journal", inplace=False).astype("string")
mismatchedJournals = []
noMismatch = True
for j in list(gTruthDf.index):
if str(gTruthDf.loc[j]) != str(observedDf.loc[j]):
noMismatch = False
mismatchedJournals.append(j)
return noMismatch, mismatchedJournals
| 683 | 16 | 270 |
54ec696345f81105b1ebefad5e46a229d6bcb23f | 7,254 | py | Python | rdflib_sqlalchemy/tables.py | gjhiggins/rdflib-sqlalchemy | d4c057934cd2675083d3df943103bdffb20341d4 | [
"BSD-3-Clause"
] | 112 | 2015-02-21T15:56:34.000Z | 2022-02-22T12:10:26.000Z | rdflib_sqlalchemy/tables.py | gjhiggins/rdflib-sqlalchemy | d4c057934cd2675083d3df943103bdffb20341d4 | [
"BSD-3-Clause"
] | 64 | 2015-01-22T12:40:11.000Z | 2021-12-27T19:15:14.000Z | rdflib_sqlalchemy/tables.py | gjhiggins/rdflib-sqlalchemy | d4c057934cd2675083d3df943103bdffb20341d4 | [
"BSD-3-Clause"
] | 28 | 2015-06-22T08:06:58.000Z | 2022-02-16T11:17:49.000Z | from sqlalchemy import Column, Table, Index, types
from rdflib_sqlalchemy.types import TermType
MYSQL_MAX_INDEX_LENGTH = 200
TABLE_NAME_TEMPLATES = [
"{interned_id}_asserted_statements",
"{interned_id}_literal_statements",
"{interned_id}_namespace_binds",
"{interned_id}_quoted_statements",
"{interned_id}_type_statements",
]
| 33.897196 | 90 | 0.600772 | from sqlalchemy import Column, Table, Index, types
from rdflib_sqlalchemy.types import TermType
MYSQL_MAX_INDEX_LENGTH = 200
TABLE_NAME_TEMPLATES = [
"{interned_id}_asserted_statements",
"{interned_id}_literal_statements",
"{interned_id}_namespace_binds",
"{interned_id}_quoted_statements",
"{interned_id}_type_statements",
]
def get_table_names(interned_id):
return [
table_name_template.format(interned_id=interned_id)
for table_name_template in TABLE_NAME_TEMPLATES
]
def create_asserted_statements_table(interned_id, metadata):
return Table(
"{interned_id}_asserted_statements".format(interned_id=interned_id),
metadata,
Column("id", types.Integer, nullable=False, primary_key=True),
Column("subject", TermType, nullable=False),
Column("predicate", TermType, nullable=False),
Column("object", TermType, nullable=False),
Column("context", TermType, nullable=False),
Column("termcomb", types.Integer, nullable=False, key="termComb"),
Index(
"{interned_id}_A_s_index".format(interned_id=interned_id),
"subject",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_A_p_index".format(interned_id=interned_id),
"predicate",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_A_o_index".format(interned_id=interned_id),
"object",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_A_c_index".format(interned_id=interned_id),
"context",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_A_termComb_index".format(interned_id=interned_id),
"termComb",
),
Index(
"{interned_id}_asserted_spoc_key".format(interned_id=interned_id),
"subject",
"predicate",
"object",
"context",
unique=True,
mysql_length=191,
),
)
def create_type_statements_table(interned_id, metadata):
return Table(
"{interned_id}_type_statements".format(interned_id=interned_id),
metadata,
Column("id", types.Integer, nullable=False, primary_key=True),
Column("member", TermType, nullable=False),
Column("klass", TermType, nullable=False),
Column("context", TermType, nullable=False),
Column("termcomb", types.Integer, nullable=False, key="termComb"),
Index(
"{interned_id}_member_index".format(interned_id=interned_id),
"member",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_klass_index".format(interned_id=interned_id),
"klass",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_c_index".format(interned_id=interned_id),
"context",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_T_termComb_index".format(interned_id=interned_id),
"termComb",
),
Index(
"{interned_id}_type_mkc_key".format(interned_id=interned_id),
"member",
"klass",
"context",
unique=True,
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
)
def create_literal_statements_table(interned_id, metadata):
return Table(
"{interned_id}_literal_statements".format(interned_id=interned_id),
metadata,
Column("id", types.Integer, nullable=False, primary_key=True),
Column("subject", TermType, nullable=False),
Column("predicate", TermType, nullable=False),
Column("object", TermType),
Column("context", TermType, nullable=False),
Column("termcomb", types.Integer, nullable=False, key="termComb"),
Column("objlanguage", types.String(255), key="objLanguage"),
Column("objdatatype", types.String(255), key="objDatatype"),
Index(
"{interned_id}_L_s_index".format(interned_id=interned_id),
"subject",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_L_p_index".format(interned_id=interned_id),
"predicate",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_L_c_index".format(interned_id=interned_id),
"context",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_L_termComb_index".format(interned_id=interned_id),
"termComb",
),
Index(
"{interned_id}_literal_spoc_key".format(interned_id=interned_id),
"subject",
"predicate",
"object",
"objLanguage",
"context",
unique=True,
mysql_length=153,
),
)
def create_quoted_statements_table(interned_id, metadata):
return Table(
"{interned_id}_quoted_statements".format(interned_id=interned_id),
metadata,
Column("id", types.Integer, nullable=False, primary_key=True),
Column("subject", TermType, nullable=False),
Column("predicate", TermType, nullable=False),
Column("object", TermType),
Column("context", TermType, nullable=False),
Column("termcomb", types.Integer, nullable=False, key="termComb"),
Column("objlanguage", types.String(255), key="objLanguage"),
Column("objdatatype", types.String(255), key="objDatatype"),
Index(
"{interned_id}_Q_s_index".format(interned_id=interned_id),
"subject",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_Q_p_index".format(interned_id=interned_id),
"predicate",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_Q_o_index".format(interned_id=interned_id),
"object",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_Q_c_index".format(interned_id=interned_id),
"context",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
),
Index(
"{interned_id}_Q_termComb_index".format(interned_id=interned_id),
"termComb",
),
Index(
"{interned_id}_quoted_spoc_key".format(interned_id=interned_id),
"subject",
"predicate",
"object",
"objLanguage",
"context",
unique=True,
mysql_length=153,
),
)
def create_namespace_binds_table(interned_id, metadata):
return Table(
"{interned_id}_namespace_binds".format(interned_id=interned_id),
metadata,
Column("prefix", types.String(20), unique=True, nullable=False, primary_key=True),
Column("uri", types.Text),
Index(
"{interned_id}_uri_index".format(interned_id=interned_id),
"uri",
mysql_length=MYSQL_MAX_INDEX_LENGTH,
)
)
| 6,760 | 0 | 138 |
67527770fab38a278e3a6a862f1b7858af4000da | 1,633 | py | Python | ubersmith_client/_http_utils.py | internap/python-ubersmithclient | b406eddfdc11315e8f685285ca8ba6523bc0fba3 | [
"Apache-2.0"
] | 1 | 2016-01-29T18:56:23.000Z | 2016-01-29T18:56:23.000Z | ubersmith_client/_http_utils.py | internap/python-ubersmithclient | b406eddfdc11315e8f685285ca8ba6523bc0fba3 | [
"Apache-2.0"
] | 16 | 2016-02-01T18:26:25.000Z | 2020-04-29T17:09:24.000Z | ubersmith_client/_http_utils.py | internap/python-ubersmithclient | b406eddfdc11315e8f685285ca8ba6523bc0fba3 | [
"Apache-2.0"
] | 8 | 2016-03-09T19:38:25.000Z | 2017-02-15T21:26:56.000Z | # Copyright 2017 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 34.020833 | 77 | 0.642376 | # Copyright 2017 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def form_encode(data):
exploded_data = {}
for k, v in data.items():
items = _explode_enumerable(k, v)
for new_key, new_val in items:
exploded_data[new_key] = new_val
return exploded_data
def form_encode_without_files(data):
return form_encode({k: v for k, v in data.items() if k != 'files'})
def _explode_enumerable(k, v):
exploded_items = []
if isinstance(v, list) or isinstance(v, tuple):
if len(v) == 0:
exploded_items.append((k, v))
else:
for idx, item in enumerate(v):
current_key = '{}[{}]'.format(k, idx)
exploded_items.extend(_explode_enumerable(current_key, item))
elif isinstance(v, dict):
if len(v) == 0:
exploded_items.append((k, v))
else:
for idx, item in v.items():
current_key = '{}[{}]'.format(k, idx)
exploded_items.extend(_explode_enumerable(current_key, item))
else:
exploded_items.append((k, v))
return exploded_items
| 988 | 0 | 69 |
9d9624410acf29a1d17afa7bb5a3ed7e67173e13 | 1,276 | py | Python | open511_server/utils/optimization.py | Open511/open511-server | f78301bff6aae582baae71022bc84e97c44e7650 | [
"MIT"
] | 1 | 2021-01-11T03:22:39.000Z | 2021-01-11T03:22:39.000Z | open511_server/utils/optimization.py | Open511/open511-server | f78301bff6aae582baae71022bc84e97c44e7650 | [
"MIT"
] | 2 | 2015-11-27T20:16:43.000Z | 2015-11-27T20:27:46.000Z | open511_server/utils/optimization.py | Open511/open511-server | f78301bff6aae582baae71022bc84e97c44e7650 | [
"MIT"
] | null | null | null | from functools import partial
import time
_cached_objects = dict()
CACHE_EXPIRY = 60 * 10
def get_cached_object(model, id):
"""
A very, very simple in-memory cache for ORM objects.
No invalidation other than restarting this app or waiting CACHE_EXPIRY seconds.
"""
lookup = (model, id)
cached = _cached_objects.get(lookup)
if cached and cached[0] > time.time():
return cached[1]
obj = model.objects.get(pk=id)
_cached_objects[lookup] = (time.time() + CACHE_EXPIRY, obj)
return obj
class memoize_method(object):
"""
Simple memoize decorator for instance methods.
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
"""
| 27.73913 | 88 | 0.610502 | from functools import partial
import time
_cached_objects = dict()
CACHE_EXPIRY = 60 * 10
def get_cached_object(model, id):
"""
A very, very simple in-memory cache for ORM objects.
No invalidation other than restarting this app or waiting CACHE_EXPIRY seconds.
"""
lookup = (model, id)
cached = _cached_objects.get(lookup)
if cached and cached[0] > time.time():
return cached[1]
obj = model.objects.get(pk=id)
_cached_objects[lookup] = (time.time() + CACHE_EXPIRY, obj)
return obj
class memoize_method(object):
"""
Simple memoize decorator for instance methods.
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
| 477 | 0 | 80 |
8a804905c39b600ecce37c7c31b15a42d43ab1af | 25,309 | py | Python | repo2docker/app.py | bmcgtech/repo2docker | a2a812d5670e0677dc33cf22f305368b6279ffa8 | [
"BSD-3-Clause"
] | 1 | 2021-03-02T12:18:17.000Z | 2021-03-02T12:18:17.000Z | repo2docker/app.py | bmcgtech/repo2docker | a2a812d5670e0677dc33cf22f305368b6279ffa8 | [
"BSD-3-Clause"
] | null | null | null | repo2docker/app.py | bmcgtech/repo2docker | a2a812d5670e0677dc33cf22f305368b6279ffa8 | [
"BSD-3-Clause"
] | 1 | 2019-10-15T14:59:07.000Z | 2019-10-15T14:59:07.000Z | """repo2docker: convert git repositories into jupyter-suitable docker images
Images produced by repo2docker can be used with Jupyter notebooks standalone
or with BinderHub.
Usage:
python -m repo2docker https://github.com/you/your-repo
"""
import json
import sys
import logging
import os
import getpass
import shutil
import tempfile
import time
import docker
from urllib.parse import urlparse
from docker.utils import kwargs_from_env
from docker.errors import DockerException
import escapism
from pythonjsonlogger import jsonlogger
from traitlets import Any, Dict, Int, List, Unicode, Bool, default
from traitlets.config import Application
from . import __version__
from .buildpacks import (
CondaBuildPack,
DockerBuildPack,
JuliaProjectTomlBuildPack,
JuliaRequireBuildPack,
LegacyBinderDockerBuildPack,
NixBuildPack,
PipfileBuildPack,
PythonBuildPack,
RBuildPack,
)
from . import contentproviders
from .utils import ByteSpecification, chdir
class Repo2Docker(Application):
"""An application for converting git repositories to docker images"""
name = "jupyter-repo2docker"
version = __version__
description = __doc__
@default("log_level")
def _default_log_level(self):
"""The application's default log level"""
return logging.INFO
git_workdir = Unicode(
None,
config=True,
allow_none=True,
help="""
Working directory to use for check out of git repositories.
The default is to use the system's temporary directory. Should be
somewhere ephemeral, such as /tmp.
""",
)
subdir = Unicode(
"",
config=True,
help="""
Subdirectory of the git repository to examine.
Defaults to ''.
""",
)
cache_from = List(
[],
config=True,
help="""
List of images to try & re-use cached image layers from.
Docker only tries to re-use image layers from images built locally,
not pulled from a registry. We can ask it to explicitly re-use layers
from non-locally built images by through the 'cache_from' parameter.
""",
)
buildpacks = List(
[
LegacyBinderDockerBuildPack,
DockerBuildPack,
JuliaProjectTomlBuildPack,
JuliaRequireBuildPack,
NixBuildPack,
RBuildPack,
CondaBuildPack,
PipfileBuildPack,
PythonBuildPack,
],
config=True,
help="""
Ordered list of BuildPacks to try when building a git repository.
""",
)
extra_build_kwargs = Dict(
{},
help="""
extra kwargs to limit CPU quota when building a docker image.
Dictionary that allows the user to set the desired runtime flag
to configure the amount of access to CPU resources your container has.
Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
""",
config=True,
)
extra_run_kwargs = Dict(
{},
help="""
extra kwargs to limit CPU quota when running a docker image.
Dictionary that allows the user to set the desired runtime flag
to configure the amount of access to CPU resources your container has.
Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
""",
config=True,
)
default_buildpack = Any(
PythonBuildPack,
config=True,
help="""
The default build pack to use when no other buildpacks are found.
""",
)
# Git is our content provider of last resort. This is to maintain the
# old behaviour when git and local directories were the only supported
# content providers. We can detect local directories from the path, but
# detecting if something will successfully `git clone` is very hard if all
# you can do is look at the path/URL to it.
content_providers = List(
[
contentproviders.Local,
contentproviders.Zenodo,
contentproviders.Figshare,
contentproviders.Dataverse,
contentproviders.Hydroshare,
contentproviders.Swhid,
contentproviders.Mercurial,
contentproviders.Git,
],
config=True,
help="""
Ordered list by priority of ContentProviders to try in turn to fetch
the contents specified by the user.
""",
)
build_memory_limit = ByteSpecification(
0,
help="""
Total memory that can be used by the docker image building process.
Set to 0 for no limits.
""",
config=True,
)
volumes = Dict(
{},
help="""
Volumes to mount when running the container.
Only used when running, not during build process!
Use a key-value pair, with the key being the volume source &
value being the destination volume.
Both source and destination can be relative. Source is resolved
relative to the current working directory on the host, and
destination is resolved relative to the working directory of the
image - ($HOME by default)
""",
config=True,
)
user_id = Int(
help="""
UID of the user to create inside the built image.
Should be a uid that is not currently used by anything in the image.
Defaults to uid of currently running user, since that is the most
common case when running r2d manually.
Might not affect Dockerfile builds.
""",
config=True,
)
@default("user_id")
def _user_id_default(self):
"""
Default user_id to current running user.
"""
return os.geteuid()
user_name = Unicode(
"jovyan",
help="""
Username of the user to create inside the built image.
Should be a username that is not currently used by anything in the
image, and should conform to the restrictions on user names for Linux.
Defaults to username of currently running user, since that is the most
common case when running repo2docker manually.
""",
config=True,
)
@default("user_name")
def _user_name_default(self):
"""
Default user_name to current running user.
"""
return getpass.getuser()
appendix = Unicode(
config=True,
help="""
Appendix of Dockerfile commands to run at the end of the build.
Can be used to customize the resulting image after all
standard build steps finish.
""",
)
json_logs = Bool(
False,
help="""
Log output in structured JSON format.
Useful when stdout is consumed by other tools
""",
config=True,
)
repo = Unicode(
".",
help="""
Specification of repository to build image for.
Could be local path or git URL.
""",
config=True,
)
ref = Unicode(
None,
help="""
Git ref that should be built.
If repo is a git repository, this ref is checked out
in a local clone before repository is built.
""",
config=True,
allow_none=True,
)
swh_token = Unicode(
None,
help="""
Token to use authenticated SWH API access.
If unset, default to unauthenticated (limited) usage of the Software
Heritage API.
""",
config=True,
allow_none=True,
)
cleanup_checkout = Bool(
False,
help="""
Delete source repository after building is done.
Useful when repo2docker is doing the git cloning
""",
config=True,
)
output_image_spec = Unicode(
"",
help="""
Docker Image name:tag to tag the built image with.
Required parameter.
""",
config=True,
)
push = Bool(
False,
help="""
Set to true to push docker image after building
""",
config=True,
)
run = Bool(
False,
help="""
Run docker image after building
""",
config=True,
)
# FIXME: Refactor class to be able to do --no-build without needing
# deep support for it inside other code
dry_run = Bool(
False,
help="""
Do not actually build the docker image, just simulate it.
""",
config=True,
)
# FIXME: Refactor classes to separate build & run steps
run_cmd = List(
[],
help="""
Command to run when running the container
When left empty, a jupyter notebook is run.
""",
config=True,
)
all_ports = Bool(
False,
help="""
Publish all declared ports from container whiel running.
Equivalent to -P option to docker run
""",
config=True,
)
ports = Dict(
{},
help="""
Port mappings to establish when running the container.
Equivalent to -p {key}:{value} options to docker run.
{key} refers to port inside container, and {value}
refers to port / host:port in the host
""",
config=True,
)
environment = List(
[],
help="""
Environment variables to set when running the built image.
Each item must be a string formatted as KEY=VALUE
""",
config=True,
)
target_repo_dir = Unicode(
"",
help="""
Path inside the image where contents of the repositories are copied to,
and where all the build operations (such as postBuild) happen.
Defaults to ${HOME} if not set
""",
config=True,
)
def fetch(self, url, ref, checkout_path):
"""Fetch the contents of `url` and place it in `checkout_path`.
The `ref` parameter specifies what "version" of the contents should be
fetched. In the case of a git repository `ref` is the SHA-1 of a commit.
Iterate through possible content providers until a valid provider,
based on URL, is found.
"""
picked_content_provider = None
for ContentProvider in self.content_providers:
cp = ContentProvider()
spec = cp.detect(url, ref=ref)
if spec is not None:
picked_content_provider = cp
self.log.info(
"Picked {cp} content "
"provider.\n".format(cp=cp.__class__.__name__)
)
break
if picked_content_provider is None:
self.log.error(
"No matching content provider found for " "{url}.".format(url=url)
)
swh_token = self.config.get("swh_token", self.swh_token)
if swh_token and isinstance(picked_content_provider, contentproviders.Swhid):
picked_content_provider.set_auth_token(swh_token)
for log_line in picked_content_provider.fetch(
spec, checkout_path, yield_output=self.json_logs
):
self.log.info(log_line, extra=dict(phase="fetching"))
if not self.output_image_spec:
image_spec = "r2d" + self.repo
# if we are building from a subdirectory include that in the
# image name so we can tell builds from different sub-directories
# apart.
if self.subdir:
image_spec += self.subdir
if picked_content_provider.content_id is not None:
image_spec += picked_content_provider.content_id
else:
image_spec += str(int(time.time()))
self.output_image_spec = escapism.escape(
image_spec, escape_char="-"
).lower()
def json_excepthook(self, etype, evalue, traceback):
"""Called on an uncaught exception when using json logging
Avoids non-JSON output on errors when using --json-logs
"""
self.log.error(
"Error during build: %s",
evalue,
exc_info=(etype, evalue, traceback),
extra=dict(phase="failed"),
)
def initialize(self):
"""Init repo2docker configuration before start"""
# FIXME: Remove this function, move it to setters / traitlet reactors
if self.json_logs:
# register JSON excepthook to avoid non-JSON output on errors
sys.excepthook = self.json_excepthook
# Need to reset existing handlers, or we repeat messages
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
self.log = logging.getLogger("repo2docker")
self.log.handlers = []
self.log.addHandler(logHandler)
self.log.setLevel(self.log_level)
else:
# due to json logger stuff above,
# our log messages include carriage returns, newlines, etc.
# remove the additional newline from the stream handler
self.log.handlers[0].terminator = ""
# We don't want a [Repo2Docker] on all messages
self.log.handlers[0].formatter = logging.Formatter(fmt="%(message)s")
if self.dry_run and (self.run or self.push):
raise ValueError("Cannot push or run image if we are not building it")
if self.volumes and not self.run:
raise ValueError("Cannot mount volumes if container is not run")
def push_image(self):
"""Push docker image to registry"""
client = docker.APIClient(version="auto", **kwargs_from_env())
# Build a progress setup for each layer, and only emit per-layer
# info every 1.5s
progress_layers = {}
layers = {}
last_emit_time = time.time()
for chunk in client.push(self.output_image_spec, stream=True):
# each chunk can be one or more lines of json events
# split lines here in case multiple are delivered at once
for line in chunk.splitlines():
line = line.decode("utf-8", errors="replace")
try:
progress = json.loads(line)
except Exception as e:
self.log.warning("Not a JSON progress line: %r", line)
continue
if "error" in progress:
self.log.error(progress["error"], extra=dict(phase="failed"))
raise docker.errors.ImageLoadError(progress["error"])
if "id" not in progress:
continue
# deprecated truncated-progress data
if "progressDetail" in progress and progress["progressDetail"]:
progress_layers[progress["id"]] = progress["progressDetail"]
else:
progress_layers[progress["id"]] = progress["status"]
# include full progress data for each layer in 'layers' data
layers[progress["id"]] = progress
if time.time() - last_emit_time > 1.5:
self.log.info(
"Pushing image\n",
extra=dict(
progress=progress_layers, layers=layers, phase="pushing"
),
)
last_emit_time = time.time()
self.log.info(
"Successfully pushed {}".format(self.output_image_spec),
extra=dict(phase="pushing"),
)
def run_image(self):
"""Run docker container from built image
and wait for it to finish.
"""
container = self.start_container()
self.wait_for_container(container)
def start_container(self):
"""Start docker container from built image
Returns running container
"""
client = docker.from_env(version="auto")
docker_host = os.environ.get("DOCKER_HOST")
if docker_host:
host_name = urlparse(docker_host).hostname
else:
host_name = "127.0.0.1"
self.hostname = host_name
if not self.run_cmd:
port = str(self._get_free_port())
self.port = port
# To use the option --NotebookApp.custom_display_url
# make sure the base-notebook image is updated:
# docker pull jupyter/base-notebook
run_cmd = [
"jupyter",
"notebook",
"--ip",
"0.0.0.0",
"--port",
port,
"--NotebookApp.custom_display_url=http://{}:{}".format(host_name, port),
]
ports = {"%s/tcp" % port: port}
else:
# run_cmd given by user, if port is also given then pass it on
run_cmd = self.run_cmd
if self.ports:
ports = self.ports
else:
ports = {}
# store ports on self so they can be retrieved in tests
self.ports = ports
container_volumes = {}
if self.volumes:
api_client = docker.APIClient(
version="auto", **docker.utils.kwargs_from_env()
)
image = api_client.inspect_image(self.output_image_spec)
image_workdir = image["ContainerConfig"]["WorkingDir"]
for k, v in self.volumes.items():
container_volumes[os.path.abspath(k)] = {
"bind": v if v.startswith("/") else os.path.join(image_workdir, v),
"mode": "rw",
}
run_kwargs = dict(
publish_all_ports=self.all_ports,
ports=ports,
detach=True,
command=run_cmd,
volumes=container_volumes,
environment=self.environment,
)
run_kwargs.update(self.extra_run_kwargs)
container = client.containers.run(self.output_image_spec, **run_kwargs)
while container.status == "created":
time.sleep(0.5)
container.reload()
return container
def wait_for_container(self, container):
"""Wait for a container to finish
Displaying logs while it's running
"""
try:
for line in container.logs(stream=True):
self.log.info(line.decode("utf-8"), extra=dict(phase="running"))
finally:
container.reload()
if container.status == "running":
self.log.info("Stopping container...\n", extra=dict(phase="running"))
container.kill()
exit_code = container.attrs["State"]["ExitCode"]
container.wait()
self.log.info(
"Container finished running.\n".upper(), extra=dict(phase="running")
)
# are there more logs? Let's send them back too
late_logs = container.logs().decode("utf-8")
for line in late_logs.split("\n"):
self.log.info(line + "\n", extra=dict(phase="running"))
container.remove()
if exit_code:
sys.exit(exit_code)
def _get_free_port(self):
"""
Hacky method to get a free random port on local host
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
def build(self):
"""
Build docker image
"""
# Check if r2d can connect to docker daemon
if not self.dry_run:
try:
docker_client = docker.APIClient(version="auto", **kwargs_from_env())
except DockerException as e:
self.log.error(
"\nDocker client initialization error: %s.\nCheck if docker is running on the host.\n",
e,
)
self.exit(1)
# If the source to be executed is a directory, continue using the
# directory. In the case of a local directory, it is used as both the
# source and target. Reusing a local directory seems better than
# making a copy of it as it might contain large files that would be
# expensive to copy.
if os.path.isdir(self.repo):
checkout_path = self.repo
else:
if self.git_workdir is None:
checkout_path = tempfile.mkdtemp(prefix="repo2docker")
else:
checkout_path = self.git_workdir
try:
self.fetch(self.repo, self.ref, checkout_path)
if self.find_image():
self.log.info(
"Reusing existing image ({}), not "
"building.".format(self.output_image_spec)
)
# no need to build, so skip to the end by `return`ing here
# this will still execute the finally clause and let's us
# avoid having to indent the build code by an extra level
return
if self.subdir:
checkout_path = os.path.join(checkout_path, self.subdir)
if not os.path.isdir(checkout_path):
self.log.error(
"Subdirectory %s does not exist",
self.subdir,
extra=dict(phase="failure"),
)
raise FileNotFoundError("Could not find {}".format(checkout_path))
with chdir(checkout_path):
for BP in self.buildpacks:
bp = BP()
if bp.detect():
picked_buildpack = bp
break
else:
picked_buildpack = self.default_buildpack()
picked_buildpack.appendix = self.appendix
# Add metadata labels
picked_buildpack.labels["repo2docker.version"] = self.version
repo_label = "local" if os.path.isdir(self.repo) else self.repo
picked_buildpack.labels["repo2docker.repo"] = repo_label
picked_buildpack.labels["repo2docker.ref"] = self.ref
if self.dry_run:
print(picked_buildpack.render())
else:
self.log.debug(
picked_buildpack.render(), extra=dict(phase="building")
)
if self.user_id == 0:
raise ValueError(
"Root as the primary user in the image is not permitted."
)
build_args = {
"NB_USER": self.user_name,
"NB_UID": str(self.user_id),
}
if self.target_repo_dir:
build_args["REPO_DIR"] = self.target_repo_dir
self.log.info(
"Using %s builder\n",
bp.__class__.__name__,
extra=dict(phase="building"),
)
for l in picked_buildpack.build(
docker_client,
self.output_image_spec,
self.build_memory_limit,
build_args,
self.cache_from,
self.extra_build_kwargs,
):
if "stream" in l:
self.log.info(l["stream"], extra=dict(phase="building"))
elif "error" in l:
self.log.info(l["error"], extra=dict(phase="failure"))
raise docker.errors.BuildError(l["error"], build_log="")
elif "status" in l:
self.log.info(
"Fetching base image...\r", extra=dict(phase="building")
)
else:
self.log.info(json.dumps(l), extra=dict(phase="building"))
finally:
# Cleanup checkout if necessary
if self.cleanup_checkout:
shutil.rmtree(checkout_path, ignore_errors=True)
| 32.489089 | 107 | 0.554151 | """repo2docker: convert git repositories into jupyter-suitable docker images
Images produced by repo2docker can be used with Jupyter notebooks standalone
or with BinderHub.
Usage:
python -m repo2docker https://github.com/you/your-repo
"""
import json
import sys
import logging
import os
import getpass
import shutil
import tempfile
import time
import docker
from urllib.parse import urlparse
from docker.utils import kwargs_from_env
from docker.errors import DockerException
import escapism
from pythonjsonlogger import jsonlogger
from traitlets import Any, Dict, Int, List, Unicode, Bool, default
from traitlets.config import Application
from . import __version__
from .buildpacks import (
CondaBuildPack,
DockerBuildPack,
JuliaProjectTomlBuildPack,
JuliaRequireBuildPack,
LegacyBinderDockerBuildPack,
NixBuildPack,
PipfileBuildPack,
PythonBuildPack,
RBuildPack,
)
from . import contentproviders
from .utils import ByteSpecification, chdir
class Repo2Docker(Application):
"""An application for converting git repositories to docker images"""
name = "jupyter-repo2docker"
version = __version__
description = __doc__
@default("log_level")
def _default_log_level(self):
"""The application's default log level"""
return logging.INFO
git_workdir = Unicode(
None,
config=True,
allow_none=True,
help="""
Working directory to use for check out of git repositories.
The default is to use the system's temporary directory. Should be
somewhere ephemeral, such as /tmp.
""",
)
subdir = Unicode(
"",
config=True,
help="""
Subdirectory of the git repository to examine.
Defaults to ''.
""",
)
cache_from = List(
[],
config=True,
help="""
List of images to try & re-use cached image layers from.
Docker only tries to re-use image layers from images built locally,
not pulled from a registry. We can ask it to explicitly re-use layers
from non-locally built images by through the 'cache_from' parameter.
""",
)
buildpacks = List(
[
LegacyBinderDockerBuildPack,
DockerBuildPack,
JuliaProjectTomlBuildPack,
JuliaRequireBuildPack,
NixBuildPack,
RBuildPack,
CondaBuildPack,
PipfileBuildPack,
PythonBuildPack,
],
config=True,
help="""
Ordered list of BuildPacks to try when building a git repository.
""",
)
extra_build_kwargs = Dict(
{},
help="""
extra kwargs to limit CPU quota when building a docker image.
Dictionary that allows the user to set the desired runtime flag
to configure the amount of access to CPU resources your container has.
Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
""",
config=True,
)
extra_run_kwargs = Dict(
{},
help="""
extra kwargs to limit CPU quota when running a docker image.
Dictionary that allows the user to set the desired runtime flag
to configure the amount of access to CPU resources your container has.
Reference https://docs.docker.com/config/containers/resource_constraints/#cpu
""",
config=True,
)
default_buildpack = Any(
PythonBuildPack,
config=True,
help="""
The default build pack to use when no other buildpacks are found.
""",
)
# Git is our content provider of last resort. This is to maintain the
# old behaviour when git and local directories were the only supported
# content providers. We can detect local directories from the path, but
# detecting if something will successfully `git clone` is very hard if all
# you can do is look at the path/URL to it.
content_providers = List(
[
contentproviders.Local,
contentproviders.Zenodo,
contentproviders.Figshare,
contentproviders.Dataverse,
contentproviders.Hydroshare,
contentproviders.Swhid,
contentproviders.Mercurial,
contentproviders.Git,
],
config=True,
help="""
Ordered list by priority of ContentProviders to try in turn to fetch
the contents specified by the user.
""",
)
build_memory_limit = ByteSpecification(
0,
help="""
Total memory that can be used by the docker image building process.
Set to 0 for no limits.
""",
config=True,
)
volumes = Dict(
{},
help="""
Volumes to mount when running the container.
Only used when running, not during build process!
Use a key-value pair, with the key being the volume source &
value being the destination volume.
Both source and destination can be relative. Source is resolved
relative to the current working directory on the host, and
destination is resolved relative to the working directory of the
image - ($HOME by default)
""",
config=True,
)
user_id = Int(
help="""
UID of the user to create inside the built image.
Should be a uid that is not currently used by anything in the image.
Defaults to uid of currently running user, since that is the most
common case when running r2d manually.
Might not affect Dockerfile builds.
""",
config=True,
)
@default("user_id")
def _user_id_default(self):
"""
Default user_id to current running user.
"""
return os.geteuid()
user_name = Unicode(
"jovyan",
help="""
Username of the user to create inside the built image.
Should be a username that is not currently used by anything in the
image, and should conform to the restrictions on user names for Linux.
Defaults to username of currently running user, since that is the most
common case when running repo2docker manually.
""",
config=True,
)
@default("user_name")
def _user_name_default(self):
"""
Default user_name to current running user.
"""
return getpass.getuser()
appendix = Unicode(
config=True,
help="""
Appendix of Dockerfile commands to run at the end of the build.
Can be used to customize the resulting image after all
standard build steps finish.
""",
)
json_logs = Bool(
False,
help="""
Log output in structured JSON format.
Useful when stdout is consumed by other tools
""",
config=True,
)
repo = Unicode(
".",
help="""
Specification of repository to build image for.
Could be local path or git URL.
""",
config=True,
)
ref = Unicode(
None,
help="""
Git ref that should be built.
If repo is a git repository, this ref is checked out
in a local clone before repository is built.
""",
config=True,
allow_none=True,
)
swh_token = Unicode(
None,
help="""
Token to use authenticated SWH API access.
If unset, default to unauthenticated (limited) usage of the Software
Heritage API.
""",
config=True,
allow_none=True,
)
cleanup_checkout = Bool(
False,
help="""
Delete source repository after building is done.
Useful when repo2docker is doing the git cloning
""",
config=True,
)
output_image_spec = Unicode(
"",
help="""
Docker Image name:tag to tag the built image with.
Required parameter.
""",
config=True,
)
push = Bool(
False,
help="""
Set to true to push docker image after building
""",
config=True,
)
run = Bool(
False,
help="""
Run docker image after building
""",
config=True,
)
# FIXME: Refactor class to be able to do --no-build without needing
# deep support for it inside other code
dry_run = Bool(
False,
help="""
Do not actually build the docker image, just simulate it.
""",
config=True,
)
# FIXME: Refactor classes to separate build & run steps
run_cmd = List(
[],
help="""
Command to run when running the container
When left empty, a jupyter notebook is run.
""",
config=True,
)
all_ports = Bool(
False,
help="""
Publish all declared ports from container whiel running.
Equivalent to -P option to docker run
""",
config=True,
)
ports = Dict(
{},
help="""
Port mappings to establish when running the container.
Equivalent to -p {key}:{value} options to docker run.
{key} refers to port inside container, and {value}
refers to port / host:port in the host
""",
config=True,
)
environment = List(
[],
help="""
Environment variables to set when running the built image.
Each item must be a string formatted as KEY=VALUE
""",
config=True,
)
target_repo_dir = Unicode(
"",
help="""
Path inside the image where contents of the repositories are copied to,
and where all the build operations (such as postBuild) happen.
Defaults to ${HOME} if not set
""",
config=True,
)
def fetch(self, url, ref, checkout_path):
"""Fetch the contents of `url` and place it in `checkout_path`.
The `ref` parameter specifies what "version" of the contents should be
fetched. In the case of a git repository `ref` is the SHA-1 of a commit.
Iterate through possible content providers until a valid provider,
based on URL, is found.
"""
picked_content_provider = None
for ContentProvider in self.content_providers:
cp = ContentProvider()
spec = cp.detect(url, ref=ref)
if spec is not None:
picked_content_provider = cp
self.log.info(
"Picked {cp} content "
"provider.\n".format(cp=cp.__class__.__name__)
)
break
if picked_content_provider is None:
self.log.error(
"No matching content provider found for " "{url}.".format(url=url)
)
swh_token = self.config.get("swh_token", self.swh_token)
if swh_token and isinstance(picked_content_provider, contentproviders.Swhid):
picked_content_provider.set_auth_token(swh_token)
for log_line in picked_content_provider.fetch(
spec, checkout_path, yield_output=self.json_logs
):
self.log.info(log_line, extra=dict(phase="fetching"))
if not self.output_image_spec:
image_spec = "r2d" + self.repo
# if we are building from a subdirectory include that in the
# image name so we can tell builds from different sub-directories
# apart.
if self.subdir:
image_spec += self.subdir
if picked_content_provider.content_id is not None:
image_spec += picked_content_provider.content_id
else:
image_spec += str(int(time.time()))
self.output_image_spec = escapism.escape(
image_spec, escape_char="-"
).lower()
def json_excepthook(self, etype, evalue, traceback):
"""Called on an uncaught exception when using json logging
Avoids non-JSON output on errors when using --json-logs
"""
self.log.error(
"Error during build: %s",
evalue,
exc_info=(etype, evalue, traceback),
extra=dict(phase="failed"),
)
def initialize(self):
"""Init repo2docker configuration before start"""
# FIXME: Remove this function, move it to setters / traitlet reactors
if self.json_logs:
# register JSON excepthook to avoid non-JSON output on errors
sys.excepthook = self.json_excepthook
# Need to reset existing handlers, or we repeat messages
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
self.log = logging.getLogger("repo2docker")
self.log.handlers = []
self.log.addHandler(logHandler)
self.log.setLevel(self.log_level)
else:
# due to json logger stuff above,
# our log messages include carriage returns, newlines, etc.
# remove the additional newline from the stream handler
self.log.handlers[0].terminator = ""
# We don't want a [Repo2Docker] on all messages
self.log.handlers[0].formatter = logging.Formatter(fmt="%(message)s")
if self.dry_run and (self.run or self.push):
raise ValueError("Cannot push or run image if we are not building it")
if self.volumes and not self.run:
raise ValueError("Cannot mount volumes if container is not run")
def push_image(self):
"""Push docker image to registry"""
client = docker.APIClient(version="auto", **kwargs_from_env())
# Build a progress setup for each layer, and only emit per-layer
# info every 1.5s
progress_layers = {}
layers = {}
last_emit_time = time.time()
for chunk in client.push(self.output_image_spec, stream=True):
# each chunk can be one or more lines of json events
# split lines here in case multiple are delivered at once
for line in chunk.splitlines():
line = line.decode("utf-8", errors="replace")
try:
progress = json.loads(line)
except Exception as e:
self.log.warning("Not a JSON progress line: %r", line)
continue
if "error" in progress:
self.log.error(progress["error"], extra=dict(phase="failed"))
raise docker.errors.ImageLoadError(progress["error"])
if "id" not in progress:
continue
# deprecated truncated-progress data
if "progressDetail" in progress and progress["progressDetail"]:
progress_layers[progress["id"]] = progress["progressDetail"]
else:
progress_layers[progress["id"]] = progress["status"]
# include full progress data for each layer in 'layers' data
layers[progress["id"]] = progress
if time.time() - last_emit_time > 1.5:
self.log.info(
"Pushing image\n",
extra=dict(
progress=progress_layers, layers=layers, phase="pushing"
),
)
last_emit_time = time.time()
self.log.info(
"Successfully pushed {}".format(self.output_image_spec),
extra=dict(phase="pushing"),
)
def run_image(self):
"""Run docker container from built image
and wait for it to finish.
"""
container = self.start_container()
self.wait_for_container(container)
def start_container(self):
"""Start docker container from built image
Returns running container
"""
client = docker.from_env(version="auto")
docker_host = os.environ.get("DOCKER_HOST")
if docker_host:
host_name = urlparse(docker_host).hostname
else:
host_name = "127.0.0.1"
self.hostname = host_name
if not self.run_cmd:
port = str(self._get_free_port())
self.port = port
# To use the option --NotebookApp.custom_display_url
# make sure the base-notebook image is updated:
# docker pull jupyter/base-notebook
run_cmd = [
"jupyter",
"notebook",
"--ip",
"0.0.0.0",
"--port",
port,
"--NotebookApp.custom_display_url=http://{}:{}".format(host_name, port),
]
ports = {"%s/tcp" % port: port}
else:
# run_cmd given by user, if port is also given then pass it on
run_cmd = self.run_cmd
if self.ports:
ports = self.ports
else:
ports = {}
# store ports on self so they can be retrieved in tests
self.ports = ports
container_volumes = {}
if self.volumes:
api_client = docker.APIClient(
version="auto", **docker.utils.kwargs_from_env()
)
image = api_client.inspect_image(self.output_image_spec)
image_workdir = image["ContainerConfig"]["WorkingDir"]
for k, v in self.volumes.items():
container_volumes[os.path.abspath(k)] = {
"bind": v if v.startswith("/") else os.path.join(image_workdir, v),
"mode": "rw",
}
run_kwargs = dict(
publish_all_ports=self.all_ports,
ports=ports,
detach=True,
command=run_cmd,
volumes=container_volumes,
environment=self.environment,
)
run_kwargs.update(self.extra_run_kwargs)
container = client.containers.run(self.output_image_spec, **run_kwargs)
while container.status == "created":
time.sleep(0.5)
container.reload()
return container
def wait_for_container(self, container):
"""Wait for a container to finish
Displaying logs while it's running
"""
try:
for line in container.logs(stream=True):
self.log.info(line.decode("utf-8"), extra=dict(phase="running"))
finally:
container.reload()
if container.status == "running":
self.log.info("Stopping container...\n", extra=dict(phase="running"))
container.kill()
exit_code = container.attrs["State"]["ExitCode"]
container.wait()
self.log.info(
"Container finished running.\n".upper(), extra=dict(phase="running")
)
# are there more logs? Let's send them back too
late_logs = container.logs().decode("utf-8")
for line in late_logs.split("\n"):
self.log.info(line + "\n", extra=dict(phase="running"))
container.remove()
if exit_code:
sys.exit(exit_code)
def _get_free_port(self):
"""
Hacky method to get a free random port on local host
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
def find_image(self):
# if this is a dry run it is Ok for dockerd to be unreachable so we
# always return False for dry runs.
if self.dry_run:
return False
# check if we already have an image for this content
client = docker.APIClient(version="auto", **kwargs_from_env())
for image in client.images():
if image["RepoTags"] is not None:
for tag in image["RepoTags"]:
if tag == self.output_image_spec + ":latest":
return True
return False
def build(self):
"""
Build docker image
"""
# Check if r2d can connect to docker daemon
if not self.dry_run:
try:
docker_client = docker.APIClient(version="auto", **kwargs_from_env())
except DockerException as e:
self.log.error(
"\nDocker client initialization error: %s.\nCheck if docker is running on the host.\n",
e,
)
self.exit(1)
# If the source to be executed is a directory, continue using the
# directory. In the case of a local directory, it is used as both the
# source and target. Reusing a local directory seems better than
# making a copy of it as it might contain large files that would be
# expensive to copy.
if os.path.isdir(self.repo):
checkout_path = self.repo
else:
if self.git_workdir is None:
checkout_path = tempfile.mkdtemp(prefix="repo2docker")
else:
checkout_path = self.git_workdir
try:
self.fetch(self.repo, self.ref, checkout_path)
if self.find_image():
self.log.info(
"Reusing existing image ({}), not "
"building.".format(self.output_image_spec)
)
# no need to build, so skip to the end by `return`ing here
# this will still execute the finally clause and let's us
# avoid having to indent the build code by an extra level
return
if self.subdir:
checkout_path = os.path.join(checkout_path, self.subdir)
if not os.path.isdir(checkout_path):
self.log.error(
"Subdirectory %s does not exist",
self.subdir,
extra=dict(phase="failure"),
)
raise FileNotFoundError("Could not find {}".format(checkout_path))
with chdir(checkout_path):
for BP in self.buildpacks:
bp = BP()
if bp.detect():
picked_buildpack = bp
break
else:
picked_buildpack = self.default_buildpack()
picked_buildpack.appendix = self.appendix
# Add metadata labels
picked_buildpack.labels["repo2docker.version"] = self.version
repo_label = "local" if os.path.isdir(self.repo) else self.repo
picked_buildpack.labels["repo2docker.repo"] = repo_label
picked_buildpack.labels["repo2docker.ref"] = self.ref
if self.dry_run:
print(picked_buildpack.render())
else:
self.log.debug(
picked_buildpack.render(), extra=dict(phase="building")
)
if self.user_id == 0:
raise ValueError(
"Root as the primary user in the image is not permitted."
)
build_args = {
"NB_USER": self.user_name,
"NB_UID": str(self.user_id),
}
if self.target_repo_dir:
build_args["REPO_DIR"] = self.target_repo_dir
self.log.info(
"Using %s builder\n",
bp.__class__.__name__,
extra=dict(phase="building"),
)
for l in picked_buildpack.build(
docker_client,
self.output_image_spec,
self.build_memory_limit,
build_args,
self.cache_from,
self.extra_build_kwargs,
):
if "stream" in l:
self.log.info(l["stream"], extra=dict(phase="building"))
elif "error" in l:
self.log.info(l["error"], extra=dict(phase="failure"))
raise docker.errors.BuildError(l["error"], build_log="")
elif "status" in l:
self.log.info(
"Fetching base image...\r", extra=dict(phase="building")
)
else:
self.log.info(json.dumps(l), extra=dict(phase="building"))
finally:
# Cleanup checkout if necessary
if self.cleanup_checkout:
shutil.rmtree(checkout_path, ignore_errors=True)
def start(self):
self.build()
if self.push:
self.push_image()
if self.run:
self.run_image()
| 675 | 0 | 54 |
0e416ddd456069d108fcc8c37156c66abbc2ccc0 | 2,824 | py | Python | tests/integration/whole_projects/test_duplo_project.py | ajeetraina/pycograph | a1bcf8e0f62a605a798e82373ec2279add83cf16 | [
"BSD-3-Clause"
] | 2 | 2021-04-20T22:39:03.000Z | 2021-11-30T20:26:23.000Z | tests/integration/whole_projects/test_duplo_project.py | ajeetraina/pycograph | a1bcf8e0f62a605a798e82373ec2279add83cf16 | [
"BSD-3-Clause"
] | 12 | 2021-04-23T13:09:49.000Z | 2021-05-15T23:39:32.000Z | tests/integration/whole_projects/test_duplo_project.py | ajeetraina/pycograph | a1bcf8e0f62a605a798e82373ec2279add83cf16 | [
"BSD-3-Clause"
] | 2 | 2022-01-09T12:55:42.000Z | 2022-03-05T09:22:29.000Z | import os
from pycograph import pycograph
from pycograph.schemas.parse_result import CALLS, CONTAINS, IMPORTS
from pycograph.schemas.pycograph_input import PycographLoadInput
from tests.integration.whole_projects.helpers import assert_edge, assert_node
| 30.042553 | 84 | 0.701487 | import os
from pycograph import pycograph
from pycograph.schemas.parse_result import CALLS, CONTAINS, IMPORTS
from pycograph.schemas.pycograph_input import PycographLoadInput
from tests.integration.whole_projects.helpers import assert_edge, assert_node
def test_duplo_project(test_data_dir, no_graph_commit):
duplo_project_path = os.path.join(test_data_dir, "duplo-project")
result = pycograph.load(PycographLoadInput(project_dir_path=duplo_project_path))
assert result.name == "duplo-project"
assert len(result.nodes) == 8
assert len(result.edges) == 14
nodes = list(result.nodes.values())
duplo_package_node = assert_node(
nodes, label="package", name="duplo", full_name="duplo"
)
content_module_node = assert_node(
nodes,
label="module",
name="content",
full_name="duplo.content",
)
main_module_node = assert_node(
nodes,
label="module",
name="main",
full_name="duplo.main",
)
answer_constant_node = assert_node(
nodes,
label="constant",
name="ANSWER",
full_name="duplo.content.ANSWER",
)
publ_function_node = assert_node(
nodes,
label="function",
name="publ",
full_name="duplo.content.publ",
)
priv_function_node = assert_node(
nodes,
label="function",
name="priv",
full_name="duplo.content.priv",
)
dummy_class_node = assert_node(
nodes,
label="class",
name="Dummy",
full_name="duplo.content.Dummy",
)
bla_function_node = assert_node(
nodes,
label="function",
name="bla",
full_name="duplo.main.bla",
)
assert_edge(duplo_package_node, CONTAINS, content_module_node, result)
assert_edge(duplo_package_node, CONTAINS, main_module_node, result)
assert_edge(content_module_node, CONTAINS, answer_constant_node, result)
assert_edge(content_module_node, CONTAINS, publ_function_node, result)
assert_edge(content_module_node, CONTAINS, priv_function_node, result)
assert_edge(content_module_node, CONTAINS, dummy_class_node, result)
assert_edge(main_module_node, CONTAINS, bla_function_node, result)
assert_edge(main_module_node, IMPORTS, answer_constant_node, result)
assert_edge(main_module_node, IMPORTS, dummy_class_node, result)
assert_edge(main_module_node, IMPORTS, publ_function_node, result)
# function call within module
assert_edge(publ_function_node, CALLS, priv_function_node, result)
# calling imported objects
assert_edge(bla_function_node, CALLS, answer_constant_node, result)
assert_edge(bla_function_node, CALLS, dummy_class_node, result)
assert_edge(bla_function_node, CALLS, publ_function_node, result)
| 2,546 | 0 | 23 |
936e1df9e466c953b4b6a6e778cd6224ad63282a | 4,790 | py | Python | parser/utils/alg.py | Jacob-Zhou/spw-parser | 5f746a54d9a1da0591fc34f024eac2639bc3f407 | [
"MIT"
] | null | null | null | parser/utils/alg.py | Jacob-Zhou/spw-parser | 5f746a54d9a1da0591fc34f024eac2639bc3f407 | [
"MIT"
] | null | null | null | parser/utils/alg.py | Jacob-Zhou/spw-parser | 5f746a54d9a1da0591fc34f024eac2639bc3f407 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from parser.utils.fn import stripe
import torch
import torch.autograd as autograd
@torch.enable_grad()
| 35.481481 | 79 | 0.597912 | # -*- coding: utf-8 -*-
from parser.utils.fn import stripe
import torch
import torch.autograd as autograd
def kmeans(x, k):
x = torch.tensor(x, dtype=torch.float)
# count the frequency of each datapoint
d, indices, f = x.unique(return_inverse=True, return_counts=True)
# calculate the sum of the values of the same datapoints
total = d * f
# initialize k centroids randomly
c, old = d[torch.randperm(len(d))[:k]], None
# assign labels to each datapoint based on centroids
dists, y = torch.abs_(d.unsqueeze(-1) - c).min(dim=-1)
# make sure number of datapoints is greater than that of clusters
assert len(d) >= k, f"unable to assign {len(d)} datapoints to {k} clusters"
while old is None or not c.equal(old):
# if an empty cluster is encountered,
# choose the farthest datapoint from the biggest cluster
# and move that the empty one
for i in range(k):
if not y.eq(i).any():
mask = y.eq(torch.arange(k).unsqueeze(-1))
lens = mask.sum(dim=-1)
biggest = mask[lens.argmax()].nonzero().view(-1)
farthest = dists[biggest].argmax()
y[biggest[farthest]] = i
mask = y.eq(torch.arange(k).unsqueeze(-1))
# update the centroids
c, old = (total * mask).sum(-1) / (f * mask).sum(-1), c
# re-assign all datapoints to clusters
dists, y = torch.abs_(d.unsqueeze(-1) - c).min(dim=-1)
# assign all datapoints to the new-generated clusters
# without considering the empty ones
y, assigned = y[indices], y.unique().tolist()
# get the centroids of the assigned clusters
centroids = c[assigned].tolist()
# map all values of datapoints to buckets
clusters = [torch.where(y.eq(i))[0].tolist() for i in assigned]
return centroids, clusters
@torch.enable_grad()
def crf(scores, mask, target=None, marg=False):
lens = mask[:, 0].sum(-1)
total = lens.sum()
batch_size, seq_len, _ = scores.shape
training = scores.requires_grad
# always enable the gradient computation of scores
# in order for the computation of marginal probs
s = inside(scores.requires_grad_(), mask)
logZ = s[0].gather(0, lens.unsqueeze(0)).sum()
# marginal probs are used for decoding, and can be computed by
# combining the inside algorithm and autograd mechanism
# instead of the entire inside-outside process
probs = scores
if marg:
probs, = autograd.grad(logZ, scores, retain_graph=training)
if target is None:
return probs
loss = (logZ - scores[mask & target].sum()) / total
return loss, probs
def inside(scores, mask):
batch_size, seq_len, _ = scores.shape
# [seq_len, seq_len, batch_size]
scores, mask = scores.permute(1, 2, 0), mask.permute(1, 2, 0)
s = torch.full_like(scores, float('-inf'))
for w in range(1, seq_len):
# n denotes the number of spans to iterate,
# from span (0, w) to span (n, n+w) given width w
n = seq_len - w
# diag_mask is used for ignoring the excess of each sentence
# [batch_size, n]
diag_mask = mask.diagonal(w)
if w == 1:
s.diagonal(w)[diag_mask] = scores.diagonal(w)[diag_mask]
continue
# [n, w, batch_size]
s_span = stripe(s, n, w-1, (0, 1)) + stripe(s, n, w-1, (1, w), 0)
# [batch_size, n, w]
s_span = s_span.permute(2, 0, 1)
s_span = s_span[diag_mask].logsumexp(-1)
s.diagonal(w)[diag_mask] = s_span + scores.diagonal(w)[diag_mask]
return s
def cky(scores, mask):
lens = mask[:, 0].sum(-1)
scores = scores.permute(1, 2, 0)
seq_len, seq_len, batch_size = scores.shape
s = scores.new_zeros(seq_len, seq_len, batch_size)
p = scores.new_zeros(seq_len, seq_len, batch_size).long()
for w in range(1, seq_len):
n = seq_len - w
starts = p.new_tensor(range(n)).unsqueeze(0)
if w == 1:
s.diagonal(w).copy_(scores.diagonal(w))
continue
# [n, w, batch_size]
s_span = stripe(s, n, w-1, (0, 1)) + stripe(s, n, w-1, (1, w), 0)
# [batch_size, n, w]
s_span = s_span.permute(2, 0, 1)
# [batch_size, n]
s_span, p_span = s_span.max(-1)
s.diagonal(w).copy_(s_span + scores.diagonal(w))
p.diagonal(w).copy_(p_span + starts + 1)
def backtrack(p, i, j):
if j == i + 1:
return [(i, j)]
split = p[i][j]
ltree = backtrack(p, i, split)
rtree = backtrack(p, split, j)
return [(i, j)] + ltree + rtree
p = p.permute(2, 0, 1).tolist()
trees = [backtrack(p[i], 0, length)
for i, length in enumerate(lens.tolist())]
return trees
| 4,565 | 0 | 91 |
d2762a23bb2cdc8d49d2ea06f39df76edbe2c4d3 | 3,803 | py | Python | atari/eval_reward.py | yilin-wang/tril | fb3f1090d2056c063602c65d8b7d952ea5037872 | [
"MIT"
] | 1 | 2021-10-17T07:00:05.000Z | 2021-10-17T07:00:05.000Z | atari/eval_reward.py | yilin-wang/tril | fb3f1090d2056c063602c65d8b7d952ea5037872 | [
"MIT"
] | null | null | null | atari/eval_reward.py | yilin-wang/tril | fb3f1090d2056c063602c65d8b7d952ea5037872 | [
"MIT"
] | null | null | null | import argparse
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
import pygame
import sys
import time
import matplotlib
import numpy as np
import pickle
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from run_test import *
from baselines.common.trex_utils import preprocess
sys.path[0] += '/baselines'
from baselines.common.trex_utils import preprocess
# from baselines.common.cmd_util import make_vec_env
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
try:
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
except Exception:
pass
if __name__ == '__main__':
num_trajs = 2000
num_snippets = 6000
num_super_snippets = 0
min_snippet_length = 50 #length of trajectory for training comparison
max_snippet_length = 100
lr = 0.00005
weight_decay = 0.0
num_iter = 5 #num times through training data
l1_reg = 0.0
stochastic = True
demonstrations = {}
for i in range(12):
with open('col1_demos/%d' % (i+1),'rb') as fp:
dem = pickle.load(fp)
demonstrations[i] = dem
# human_rankings = []
# label_reader = open("human_labels/si_columns.csv")
# for i,line in enumerate(label_reader):
# if i == 0:
# continue #skip header info
# parsed = line.split(",")
# a_index = int(parsed[0])
# b_index = int(parsed[1])
# label = int(parsed[2])
# human_rankings.append((a_index, b_index, label))
reward_model_path = './learned_models/col1.params'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
reward_net = Net()
reward_net.load_state_dict(torch.load(reward_model_path))
reward_net.to(device)
with torch.no_grad():
pred_returns = [(predict_traj_return(reward_net, traj), len(traj)) for traj in demonstrations.values()]
for i, p in enumerate(pred_returns):
print(i+1,p[0],p[1],p[0]/p[1]) | 31.429752 | 111 | 0.648961 | import argparse
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
import pygame
import sys
import time
import matplotlib
import numpy as np
import pickle
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from run_test import *
from baselines.common.trex_utils import preprocess
sys.path[0] += '/baselines'
from baselines.common.trex_utils import preprocess
# from baselines.common.cmd_util import make_vec_env
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
try:
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
except Exception:
pass
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(4, 16, 7, stride=3)
self.conv2 = nn.Conv2d(16, 16, 5, stride=2)
self.conv3 = nn.Conv2d(16, 16, 3, stride=1)
self.conv4 = nn.Conv2d(16, 16, 3, stride=1)
self.fc1 = nn.Linear(784, 64)
self.fc2 = nn.Linear(64, 1)
def cum_return(self, traj):
'''calculate cumulative return of trajectory'''
sum_rewards = 0
sum_abs_rewards = 0
x = traj.permute(0,3,1,2) #get into NCHW format
x = F.leaky_relu(self.conv1(x))
x = F.leaky_relu(self.conv2(x))
x = F.leaky_relu(self.conv3(x))
x = F.leaky_relu(self.conv4(x))
# x = x.view(-1, 784)
x = x.reshape(-1,784)
x = F.leaky_relu(self.fc1(x))
r = self.fc2(x)
sum_rewards += torch.sum(r)
sum_abs_rewards += torch.sum(torch.abs(r))
return sum_rewards, sum_abs_rewards
def forward(self, traj_i, traj_j):
'''compute cumulative return for each trajectory and return logits'''
cum_r_i, abs_r_i = self.cum_return(traj_i)
cum_r_j, abs_r_j = self.cum_return(traj_j)
return torch.cat((cum_r_i.unsqueeze(0), cum_r_j.unsqueeze(0)),0), abs_r_i + abs_r_j
def predict_reward_sequence(net, traj):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
rewards_from_obs = []
with torch.no_grad():
for s in traj:
r = net.cum_return(torch.from_numpy(np.array([s])).float().to(device))[0].item()
r = 1/(1+np.exp(-r))
rewards_from_obs.append(r)
return rewards_from_obs
def predict_traj_return(net, traj):
return sum(predict_reward_sequence(net, traj))
if __name__ == '__main__':
num_trajs = 2000
num_snippets = 6000
num_super_snippets = 0
min_snippet_length = 50 #length of trajectory for training comparison
max_snippet_length = 100
lr = 0.00005
weight_decay = 0.0
num_iter = 5 #num times through training data
l1_reg = 0.0
stochastic = True
demonstrations = {}
for i in range(12):
with open('col1_demos/%d' % (i+1),'rb') as fp:
dem = pickle.load(fp)
demonstrations[i] = dem
# human_rankings = []
# label_reader = open("human_labels/si_columns.csv")
# for i,line in enumerate(label_reader):
# if i == 0:
# continue #skip header info
# parsed = line.split(",")
# a_index = int(parsed[0])
# b_index = int(parsed[1])
# label = int(parsed[2])
# human_rankings.append((a_index, b_index, label))
reward_model_path = './learned_models/col1.params'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
reward_net = Net()
reward_net.load_state_dict(torch.load(reward_model_path))
reward_net.to(device)
with torch.no_grad():
pred_returns = [(predict_traj_return(reward_net, traj), len(traj)) for traj in demonstrations.values()]
for i, p in enumerate(pred_returns):
print(i+1,p[0],p[1],p[0]/p[1]) | 734 | 952 | 69 |
247c64042c7a5d5b5cae2efb1bf244c7d158f68c | 315 | py | Python | save_tiff.py | xpspectre/cell-image | 6ceb7bc362d36408fe3c634f9c7155238d92c337 | [
"MIT"
] | 1 | 2017-06-07T14:28:15.000Z | 2017-06-07T14:28:15.000Z | save_tiff.py | xpspectre/cell-image | 6ceb7bc362d36408fe3c634f9c7155238d92c337 | [
"MIT"
] | null | null | null | save_tiff.py | xpspectre/cell-image | 6ceb7bc362d36408fe3c634f9c7155238d92c337 | [
"MIT"
] | null | null | null | from PIL import Image, TiffImagePlugin
TiffImagePlugin.WRITE_LIBTIFF = True
def save_tiff(output, img):
"""Save numpy array img as compressed TIFF as output file
Args:
output:
img:
Returns:
"""
pil_img = Image.fromarray(img)
pil_img.save(output, compression='packbits') | 18.529412 | 61 | 0.669841 | from PIL import Image, TiffImagePlugin
TiffImagePlugin.WRITE_LIBTIFF = True
def save_tiff(output, img):
"""Save numpy array img as compressed TIFF as output file
Args:
output:
img:
Returns:
"""
pil_img = Image.fromarray(img)
pil_img.save(output, compression='packbits') | 0 | 0 | 0 |
22f9a5041480ef7fa689f8e7296ecbdfa9a3cc6a | 1,360 | py | Python | Assignment02/Part01/testfile2.py | saurabhkakade21/AIS_spring2021 | 784d20670794c405505b09c1feea36e0a504ae5d | [
"MIT"
] | null | null | null | Assignment02/Part01/testfile2.py | saurabhkakade21/AIS_spring2021 | 784d20670794c405505b09c1feea36e0a504ae5d | [
"MIT"
] | null | null | null | Assignment02/Part01/testfile2.py | saurabhkakade21/AIS_spring2021 | 784d20670794c405505b09c1feea36e0a504ae5d | [
"MIT"
] | null | null | null |
from program2_funs import Searcher
# , hSLD, SNode
# (b) Show your program loading in the 30-node sample file.
s = Searcher("30node.txt")
# (c) Show you program setting start node=U and end node=T.
s.setStartGoal('U','T')
# myViz should be a DRDViz instance -> save map to file on disk.
s.myViz.save("30node1.png")
# (d) Show the one open node.
# [n.showBasic() for n in s.open]
# for n in s.open:
# print(n)
# # (e) Show successors of only open node.
# initial_children = s.successors(s.open.pop(0))
# [n.showBasic() for n in initial_children]
# # (f) Show three inserts: at the front, and the end, and "in order"
# def reset_insert(where):
# s.reset()
# initial_children = s.successors(s.open.pop(0))
# insert_method = getattr(s, "insert_"+where)
# insert_method(initial_children)
# return [n.showBasic() for n in s.open]
# reset_insert("front")
# reset_insert("end")
# reset_insert("ordered")
# # (g) INSERT (K,500), (C,91) and (J,10) and show no duplicates.
# newdata = (("K",500), ("C",91), ("J",10))
# newlist = [SNode(label=label, pathcost=pathcost) for label, pathcost in newdata]
# ignored = s.insert_end(newlist)
# [n.showBasic() for n in s.open]
# # 3. hSLD heuritic function being called on three nodes.
# [hSLD(x, s) for x in ("V", "AC", "J")]
| 26.666667 | 83 | 0.627941 |
from program2_funs import Searcher
# , hSLD, SNode
# (b) Show your program loading in the 30-node sample file.
s = Searcher("30node.txt")
# (c) Show you program setting start node=U and end node=T.
s.setStartGoal('U','T')
# myViz should be a DRDViz instance -> save map to file on disk.
s.myViz.save("30node1.png")
# (d) Show the one open node.
# [n.showBasic() for n in s.open]
# for n in s.open:
# print(n)
# # (e) Show successors of only open node.
# initial_children = s.successors(s.open.pop(0))
# [n.showBasic() for n in initial_children]
# # (f) Show three inserts: at the front, and the end, and "in order"
# def reset_insert(where):
# s.reset()
# initial_children = s.successors(s.open.pop(0))
# insert_method = getattr(s, "insert_"+where)
# insert_method(initial_children)
# return [n.showBasic() for n in s.open]
# reset_insert("front")
# reset_insert("end")
# reset_insert("ordered")
# # (g) INSERT (K,500), (C,91) and (J,10) and show no duplicates.
# newdata = (("K",500), ("C",91), ("J",10))
# newlist = [SNode(label=label, pathcost=pathcost) for label, pathcost in newdata]
# ignored = s.insert_end(newlist)
# [n.showBasic() for n in s.open]
# # 3. hSLD heuritic function being called on three nodes.
# [hSLD(x, s) for x in ("V", "AC", "J")]
| 0 | 0 | 0 |
bff1e76ad164593084fbefad95a83b162df75c01 | 146 | py | Python | headers/__init__.py | kirsn/py-message-headers | 3b79ce640823940552ed146d2171d4cd42c0796f | [
"MIT"
] | null | null | null | headers/__init__.py | kirsn/py-message-headers | 3b79ce640823940552ed146d2171d4cd42c0796f | [
"MIT"
] | null | null | null | headers/__init__.py | kirsn/py-message-headers | 3b79ce640823940552ed146d2171d4cd42c0796f | [
"MIT"
] | null | null | null | # Generated on 2019-02-03T13:03:06.509000
from mail import *
from http import *
from mime import *
from netnews import *
VERSION = "2019.02.03"
| 16.222222 | 41 | 0.726027 | # Generated on 2019-02-03T13:03:06.509000
from mail import *
from http import *
from mime import *
from netnews import *
VERSION = "2019.02.03"
| 0 | 0 | 0 |
dfcae8d70a1362e79e9bb893adf6e81fc66322dd | 6,471 | py | Python | allocator_2/allocator/views.py | mike-fam/allocator_v2 | da634e97f2c70dba89f78f884a564d473ff03648 | [
"MIT"
] | null | null | null | allocator_2/allocator/views.py | mike-fam/allocator_v2 | da634e97f2c70dba89f78f884a564d473ff03648 | [
"MIT"
] | null | null | null | allocator_2/allocator/views.py | mike-fam/allocator_v2 | da634e97f2c70dba89f78f884a564d473ff03648 | [
"MIT"
] | null | null | null | import hashlib
import json
import multiprocessing as mp
from typing import Optional
import psutil
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from django.utils import timezone
from .constants import (
REQUESTED_MESSAGE,
REQUESTED_TITLE,
NOT_READY_TITLE,
NOT_READY_MESSAGE,
KILLED_TITLE,
KILLED_MESSAGE,
)
from .type_hints import AllocationStatus
from .allocation import Allocator, _run_allocation
from .schema import InputData
from .models import AllocationState
from .utils import seconds_to_eta
@csrf_exempt
@require_POST
@require_GET
| 35.751381 | 76 | 0.616752 | import hashlib
import json
import multiprocessing as mp
from typing import Optional
import psutil
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from django.utils import timezone
from .constants import (
REQUESTED_MESSAGE,
REQUESTED_TITLE,
NOT_READY_TITLE,
NOT_READY_MESSAGE,
KILLED_TITLE,
KILLED_MESSAGE,
)
from .type_hints import AllocationStatus
from .allocation import Allocator, _run_allocation
from .schema import InputData
from .models import AllocationState
from .utils import seconds_to_eta
def _replace_existing_allocation(
allocation_state: AllocationState,
allocator: Allocator,
timetable_id: str,
new_timeout: Optional[int] = None,
) -> None:
try:
proc = psutil.Process(pid=allocation_state.pid)
proc.terminate()
except psutil.NoSuchProcess:
pass
new_process = mp.Process(
target=_run_allocation, args=(allocator, timetable_id)
)
new_process.start()
if new_timeout is not None:
allocation_state.timeout = new_timeout
allocation_state.pid = new_process.pid
allocation_state.result = None
allocation_state.type = AllocationStatus.REQUESTED
allocation_state.message = REQUESTED_MESSAGE.format(
time=seconds_to_eta(allocation_state.timeout)
)
allocation_state.title = "Allocation Successfully Requested"
allocation_state.request_time = timezone.now()
allocation_state.save()
@csrf_exempt
@require_POST
def request_allocation(request):
# TODO: later upgrade to realtime notification using Redis
json_data = json.loads(request.body)["data"]
data_hash = hashlib.sha256(
json.dumps(json_data, separators=(":", ",")).encode()
).digest()
data = InputData(**json_data)
allocator = Allocator(data)
timetable_id = data.timetable_id
try:
allocation_state = AllocationState.objects.get(
timetable_id=data.timetable_id
)
# Found object, request already made
# Hash matches, no state change
if data_hash == allocation_state.data_hash:
if allocation_state.type in (
AllocationStatus.REQUESTED,
AllocationStatus.NOT_READY,
):
# Result not found yet, allocation still running
if allocation_state.type == AllocationStatus.REQUESTED:
allocation_state.type = AllocationStatus.NOT_READY
allocation_state.title = NOT_READY_TITLE
allocation_state.message = NOT_READY_MESSAGE
allocation_state.save()
eta = int(
allocation_state.request_time.timestamp()
+ allocation_state.timeout
- timezone.now().timestamp()
)
allocation_state.message = allocation_state.message.format(
time=seconds_to_eta(max(eta, 0))
)
elif allocation_state.type == AllocationStatus.ERROR:
_replace_existing_allocation(
allocation_state, allocator, timetable_id
)
else:
# Hash doesn't match, request remade with modified data
_replace_existing_allocation(
allocation_state, allocator, timetable_id, data.timeout
)
except AllocationState.DoesNotExist:
# No object found, new request
new_process = mp.Process(
target=_run_allocation, args=(allocator, timetable_id)
)
new_process.start()
allocation_state = AllocationState(
timetable_id=timetable_id,
data_hash=data_hash,
pid=new_process.pid,
request_time=timezone.now(),
timeout=data.timeout,
type=AllocationStatus.REQUESTED,
title=REQUESTED_TITLE,
message=REQUESTED_MESSAGE,
)
allocation_state.save()
allocation_state.message = allocation_state.message.format(
time=seconds_to_eta(data.timeout)
)
return JsonResponse(
{
"type": allocation_state.type,
"message": allocation_state.message,
"title": allocation_state.title,
"result": allocation_state.result,
}
)
@require_GET
def check_allocation(request, timetable_id):
try:
allocation_state = AllocationState.objects.get(
timetable_id=timetable_id
)
# Request has been made
if allocation_state.type in (
AllocationStatus.REQUESTED,
AllocationStatus.NOT_READY,
):
if psutil.pid_exists(allocation_state.pid):
# Result not found yet, allocation still running
if allocation_state.type == AllocationStatus.REQUESTED:
allocation_state.type = AllocationStatus.NOT_READY
allocation_state.title = NOT_READY_TITLE
allocation_state.message = NOT_READY_MESSAGE
allocation_state.save()
eta = int(
allocation_state.request_time.timestamp()
+ allocation_state.timeout
- timezone.now().timestamp()
)
allocation_state.message = allocation_state.message.format(
time=seconds_to_eta(max(eta, 0))
)
else:
# For some reason cannot find pid, maybe process was killed
allocation_state.type = AllocationStatus.ERROR
allocation_state.title = KILLED_TITLE
allocation_state.message = KILLED_MESSAGE
allocation_state.save()
return JsonResponse(
{
"type": allocation_state.type,
"message": allocation_state.message,
"title": allocation_state.title,
"result": allocation_state.result,
}
)
except AllocationState.DoesNotExist:
# Request has not been made
return JsonResponse(
{
"type": AllocationStatus.NOT_EXIST,
"message": "No request for an allocation of this timetable "
"has been made",
"title": "Allocation not found",
}
)
| 5,722 | 0 | 67 |
1a1d3d9e95c93601968362551c5458c497c9c24f | 111 | py | Python | images/backer/src/lib/django_webpack/__init__.py | elston/djangit | 1d9ec2e287447fa8926a6fc440469771120df6a1 | [
"MIT"
] | null | null | null | images/backer/src/lib/django_webpack/__init__.py | elston/djangit | 1d9ec2e287447fa8926a6fc440469771120df6a1 | [
"MIT"
] | null | null | null | images/backer/src/lib/django_webpack/__init__.py | elston/djangit | 1d9ec2e287447fa8926a6fc440469771120df6a1 | [
"MIT"
] | null | null | null | __author__ = 'Den Elston'
__version__ = '0.0.1'
default_app_config = 'django_webpack.apps.DjangoWebpackConfig' | 27.75 | 62 | 0.792793 | __author__ = 'Den Elston'
__version__ = '0.0.1'
default_app_config = 'django_webpack.apps.DjangoWebpackConfig' | 0 | 0 | 0 |
d1c5ce506f21389536550ff93ab7ab36b9a910d5 | 355 | py | Python | Python/circle pattern.py | Chanchal2125/Hacktoberfest2021_PatternMaking | c962f1e93f45a97351fbffc49ed1fc526741d772 | [
"MIT"
] | 1 | 2021-10-09T11:48:21.000Z | 2021-10-09T11:48:21.000Z | Python/circle pattern.py | Chanchal2125/Hacktoberfest2021_PatternMaking | c962f1e93f45a97351fbffc49ed1fc526741d772 | [
"MIT"
] | null | null | null | Python/circle pattern.py | Chanchal2125/Hacktoberfest2021_PatternMaking | c962f1e93f45a97351fbffc49ed1fc526741d772 | [
"MIT"
] | null | null | null | import math
radius = 10
printPattern(radius) | 25.357143 | 49 | 0.55493 | import math
def printPattern(radius):
for i in range((2 * radius)+1):
for j in range((2 * radius)+1):
dist = math.sqrt((i - radius) * (i - radius) +
(j - radius) * (j - radius))
if (dist > radius - 1 and dist < radius + 1):
print("*",end="")
else:
print(" ",end="")
print()
# Driver code
radius = 10
printPattern(radius) | 289 | 0 | 22 |