text stringlengths 8 6.05M |
|---|
import sys
import os
from PIL import Image
# Grab first and second argument
poke_path = sys.argv[1]
new_path = sys.argv[2]
# Check is new/ exists, if not create
if not os.path.exists(new_path):
os.mkdir(new_path)
# Loop through Pokedex, convert images to PNG
for filename in os.listdir(poke_path):
img = Image.open(os.path.join(poke_path, filename))
clean_name = os.path.splitext(filename)[0]
img.save(f'{new_path}{clean_name}.png', 'png')
|
registry = dict(version=0)
def bind():
from cPickle import loads as _loads
_lookup_attr = _loads('cchameleon.core.codegen\nlookup_attr\np1\n.')
_init_scope = _loads('cchameleon.core.utils\necontext\np1\n.')
_re_amp = _loads("cre\n_compile\np1\n(S'&(?!([A-Za-z]+|#[0-9]+);)'\np2\nI0\ntRp3\n.")
_attrs_4301940368 = _loads('(dp1\n.')
_init_stream = _loads('cchameleon.core.generation\ninitialize_stream\np1\n.')
_attrs_4301940176 = _loads('(dp1\n.')
_attrs_4301940496 = _loads('(dp1\n.')
_init_default = _loads('cchameleon.core.generation\ninitialize_default\np1\n.')
_attrs_4301940816 = _loads('(dp1\nVname\np2\nVroot_url\np3\nsVtype\np4\nVhidden\np5\nsVvalue\np6\nV${request.fa_url()}\np7\ns.')
_attrs_4301940688 = _loads('(dp1\n.')
_init_tal = _loads('cchameleon.core.generation\ninitialize_tal\np1\n.')
def render(econtext, rcontext=None):
macros = econtext.get('macros')
_translate = econtext.get('_translate')
_slots = econtext.get('_slots')
target_language = econtext.get('target_language')
u'_init_stream()'
(_out, _write, ) = _init_stream()
u'_init_tal()'
(_attributes, repeat, ) = _init_tal()
u'_init_default()'
_default = _init_default()
u'None'
default = None
u'None'
_domain = None
attrs = _attrs_4301940176
_write(u'<html>\n ')
attrs = _attrs_4301940368
_write(u'<head>\n </head>\n ')
attrs = _attrs_4301940496
_write(u'<body>\n ')
attrs = _attrs_4301940688
_write(u'<div>\n </div>\n ')
attrs = _attrs_4301940816
"join(value('request.fa_url()'),)"
_write(u'<input type="hidden" name="root_url"')
_tmp1 = _lookup_attr(econtext['request'], 'fa_url')()
if (_tmp1 is _default):
_tmp1 = u'${request.fa_url()}'
if ((_tmp1 is not None) and (_tmp1 is not False)):
if (_tmp1.__class__ not in (str, unicode, int, float, )):
_tmp1 = unicode(_translate(_tmp1, domain=_domain, mapping=None, target_language=target_language, default=None))
else:
if not isinstance(_tmp1, unicode):
_tmp1 = str(_tmp1)
if ('&' in _tmp1):
if (';' in _tmp1):
_tmp1 = _re_amp.sub('&', _tmp1)
else:
_tmp1 = _tmp1.replace('&', '&')
if ('<' in _tmp1):
_tmp1 = _tmp1.replace('<', '<')
if ('>' in _tmp1):
_tmp1 = _tmp1.replace('>', '>')
if ('"' in _tmp1):
_tmp1 = _tmp1.replace('"', '"')
_write(((' value="' + _tmp1) + '"'))
_write(u' />\n </body>\n</html>')
return _out.getvalue()
return render
__filename__ = '/Users/gawel/py/formalchemy_project/fa.extjs/fa/extjs/index.pt'
registry[(None, True, '1488bdb950901f8f258549439ef6661a49aae984')] = bind()
|
# Generated by Django 2.1.12 on 2019-11-05 04:32
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_rater_app', '0002_auto_20191105_0427'),
]
operations = [
migrations.AlterField(
model_name='course',
name='academic_disciplines',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=100), blank=True, null=True, size=3),
),
]
|
from Exceptions import BFSyntaxError, BFSemanticError
from Token import Token
from functools import reduce
"""
This file holds functions that generate general Brainfuck code
And general functions that are not dependent on other objects
"""
# =================
# Brainfuck code
# =================
def get_set_cell_value_code(new_value, previous_value, zero_next_cell_if_necessary=True):
# this function returns a code that sets the current cell's value to new_value,
# given that its previous value is previous_value
# it may return the "naive" way, of "+"/"-" usage, <offset> times
# and it may return an optimization using loops, by using the next cell as a loop counter
# if zero_next_cell_if_necessary is set to False, it assumes that the next cell is already 0
# after the code of this function is executed, the pointer will point to the original cell
# this function returns the shorter code between "naive" and "looped"
def get_char(value):
return "+" if value > 0 else "-"
offset = new_value - previous_value
char = get_char(offset)
is_negative = offset < 0
offset = abs(offset)
# "naive" code is simply +/-, <offset> times
naive = char * offset
# "looped" code is "[<a> times perform <b> adds/subs] and then <c> more adds/subs"
def get_abc(offset):
# returns a,b,c such that a*b+c=offset and a+b+c is minimal
min_a, min_b, min_c = offset, 1, 0
min_sum = offset + 1
left = 1
right = offset // 2 - 1
while right >= left:
a, b = left + 1, right + 1
c = offset - a * b
curr_sum = abs(a) + abs(b) + abs(c)
if curr_sum < min_sum:
min_a, min_b, min_c = a, b, c
min_sum = curr_sum
if a * b > offset:
right -= 1
else:
left += 1
return min_a, min_b, min_c
a, b, c = get_abc(offset)
looped = ">" # point to next cell (loop counter)
if zero_next_cell_if_necessary:
looped += "[-]" # zero it if necessary
looped += "+" * a # set loop counter
looped += "[<" + char * abs(b) + ">-]" # sub 1 from counter, perform b actions
looped += "<" # point to "character" cell
looped += get_char(-c if is_negative else c) * abs(c) # c more actions
if len(naive) > len(looped):
return looped
else:
return naive
def get_move_to_offset_code(offset):
# returns code that moves value from current pointer to cell at offset <offset> to the left
# after this, the pointer points to the original cell, which is now the next available cell
code = "<" * offset # point to destination
code += "[-]" # zero destination
code += ">" * offset # point to source cell
code += "[" + "<" * offset + "+" + ">" * offset + "-]" # increase destination, zero source
# point to next free location (source, which is now zero)
return code
def get_copy_to_offset_code(offset):
# returns code that copies value from current pointer to cell at offset <offset> to the left
# after this, the pointer points to the original cell, which remains unchanged
code = ">" # point to temp
code += "[-]" # zero temp
code += "<" * (offset + 1) # point to destination
code += "[-]" # zero destination
code += ">" * offset # point to source cell
code += "[>+" + "<" * (offset + 1) + "+" + ">" * offset + "-]" # increase temp and destination, zero source
code += ">" # point to temp
code += "[<+>-]" # move temp to original cell
code += "<" # point to original cell
return code
def get_copy_to_variable_code(ids_map_list, ID_token, current_pointer):
# returns code that copies value from current pointer to cell of the variable ID
# after this, the pointer points to the original cell, which remains unchanged
offset = get_offset_to_variable(ids_map_list, ID_token, current_pointer)
return get_copy_to_offset_code(offset)
def get_move_to_return_value_cell_code(return_value_cell, current_stack_pointer):
# returns code that moves value from current pointer to return_value cell
# after this, the pointer points to the original cell, which is now the next available cell
# we need to move it <current_stack_pointer - return_value_cell> cells left
return get_move_to_offset_code(current_stack_pointer - return_value_cell)
def unpack_multidimensional_literal_tokens_to_array_dimensions(ID_token, array_dimensions, literal_tokens_list):
if len(array_dimensions) == 0:
raise BFSemanticError("Tried to initialize array %s with too many nested sub-arrays" % ID_token)
if len(literal_tokens_list) > array_dimensions[0]:
raise BFSemanticError("Tried to initialize array %s dimension %s with too many elements (%s)"
% (ID_token, str(array_dimensions), str(len(literal_tokens_list))))
result = []
for element in literal_tokens_list:
if isinstance(element, list):
# recursively unpack the list with the sub-dimension of the sub-array
# E.g if we have arr[3][3][3] and then this call will fill [3][3]=9 elements
result.extend(unpack_multidimensional_literal_tokens_to_array_dimensions(ID_token, array_dimensions[1:], element))
else:
result.append(element)
if len(array_dimensions) > 1:
dimension_size = dimensions_to_size(array_dimensions[1:]) # current size we need to fill
result.extend([Token(Token.NUM, 0, 0, "0")] * (dimension_size - 1)) # fill missing elements in this dimension with zeros
dimension_size = dimensions_to_size(array_dimensions) # current size we need to fill
result.extend([Token(Token.NUM, 0, 0, "0")] * (dimension_size-len(result))) # fill the result with zeros
return result
def unpack_literal_tokens_to_array_dimensions(ID_token, array_dimensions, literal_tokens_list):
# gets array dimensions and list of (list of list of...) literal tokens to initialize it with
# returns one long list of literal tokens that can be used to initialize the array as a one dimensional array
# if there are missing literals to fill the entire array, then fill the blanks with NUM 0
# E.g if the code is int arr[3][3][3] = {{1,2,3}, {}, {7, 8}}
# Then this function receives ([3,3,3] and [[1,2,3],[],[7,8]]) and returns [1,2,3,0,0,0,7,8,0] (all are tokens)
array_size = dimensions_to_size(array_dimensions) # current size we need to fill
if all(not isinstance(element, list) for element in literal_tokens_list):
# special case - if all elements are literals, then we allow assigning them as-is and not care about dimensions
# E.g if we have arr[3][3][3] = {1,2,3,4} then return [1,2,3,4,0,0,0,0,0]
unpacked_literals_list = literal_tokens_list + [Token(Token.NUM, 0, 0, "0")] * (array_size - len(literal_tokens_list)) # fill missing with zeros
else:
unpacked_literals_list = unpack_multidimensional_literal_tokens_to_array_dimensions(ID_token, array_dimensions, literal_tokens_list)
if len(unpacked_literals_list) > array_size:
raise BFSemanticError("Tried to initialize array %s with incompatible amount of literals."
" (array size is %s and literals size is %s)" % (ID_token, str(array_size), str(len(unpacked_literals_list))))
assert len(unpacked_literals_list) == array_size
return unpacked_literals_list
def process_switch_cases(expression_code, cases):
# This function receives expression_code (string) and cases (list of tuples) corresponding to switch cases
# Each tuple is (case_value, case_code, has_break)
# And it returns code for the switch-case statement (string)
if len(cases) == 0:
code = ">" # point to next cell
code += expression_code # evaluate expression
code += "<" # point to expression
code += "<" # discard result
return code
def process_cases(cases):
# This function gets the cases list of tuples
# And returns 2 values: default_code (string), all_cases_have_break (bool)
# Note - default_code includes code of all relevant cases that are after the default case (if there's no break)
all_cases_have_break = all(has_break for (_, _, has_break) in cases)
has_default, default_code = False, ""
for case, case_code, has_break in cases:
if case == "default":
has_default = True
if has_default:
default_code += case_code
if has_break:
break
return default_code, all_cases_have_break
default_code, all_cases_have_break = process_cases(cases)
# using 2 temp cells: need_to_execute, expression_value
# need_to_execute - initialized with 1, zeroed if running any case. indicating we should execute code for one of the cases
# expression_value - initialized with expression's value, this is what we compare our cases' values to
code = "[-]+" # need_to_execute = 1
code += ">" # point to next cell
code += expression_code # evaluate expression
code += "<" # point to expression
if all_cases_have_break: # small optimization for evaluating the expression
cases = [case for case in cases if case[0] != "default"] # remove default to be able to sort. it is handled differently
cases.sort(key=lambda x: x[0], reverse=True) # Can sort since correct flow is not needed
"""
This loop compares the expression value to each case in the switch-case statement, in reverse order
It does so by increasing and decreasing expression, and comparing result to 0
E.G. if we have
switch(x) {
case 2:
case 0:
case 5:
case 1:
}
x will be put in <expression> cell, then:
Iteration 1 will "increase" <expression> cell by -1 (0-1) (comparing x with 1)
Iteration 2 will "increase" <expression> cell by -4 (1-5) (comparing x with 5)
Iteration 3 will increase <expression> cell by +5 (5-0) (comparing x with 0)
Iteration 4 will "increase" <expression> cell by -2 (0-2) (comparing x with 2)
"""
# at this point, we point to expression_value cell
comparisons = 0
last_case_val = 0
for case, _, _ in reversed(cases):
if case == "default":
continue # default is handled differently
code += get_set_cell_value_code(-case, last_case_val)
last_case_val = -case
code += "[" # "if zero then jump to matching code part"
comparisons += 1
"""
Then we add each case's code in the correct order:
<need_to_execute=1>
<compare_with_1> [
<compare_with_5> [
<compare_with_0> [
<compare_with_2> [
<default_code> <expression_value=0> <need_to_execute=0>
] <if need_to_execute> <code_for_2> <need_to_execute=0>
] <if need_to_execute> <code_for_0> <need_to_execute=0>
] <if need_to_execute> <code_for_5> <need_to_execute=0>
] <if need_to_execute> <code_for_1> <need_to_execute=0>
notice each case uses the next case's ']' instruction to return to the comparisons block
for example, the '[' in case 5 line uses the ']' of case 1 code to "return" to the comparisons
this is because there is no way to "skip" code
"""
# This code will execute after all the comparisons are done and non of the cases executed
if default_code:
code += ">" # point to next available cell for running the "default" code
code += default_code # add code for default case (it also includes all the following cases until break)
code += "<" # point to expression_value
code += "<-" # need_to_execute = 0
code += ">[-]" # expression_value = 0. When going back to last comparison, it will be 0, so we skip the default
if comparisons > 0:
code += "]" # "jump back address" of the last comparison
comparisons -= 1
# Add all the cases code
for case_index, (case, case_code, has_break) in enumerate(cases):
if case == "default":
continue # default is handled differently
if has_break or case_code or default_code: # Meaning this case is not identical to the following case
# Or there exist a default case. And because it is handled differently, we need to have its code multiple times in different locations
# (if they are identical then no need to generate the same code multiple times (one for each case).
# this case will use the following case's code in the next loop iteration)
# Generate code for this case (unique)
code += "<" # point to need_to_execute
code += "[" # if its non-zero (i.e need to execute the code for this case)
code += ">>" # point to next available cell for running the code
# Insert the code from this case and all the following cases until reaching break
# This generates a lot of code since each case includes all following cases until reaching break
for _, following_case_code, following_has_break in cases[case_index:]:
code += following_case_code
if following_has_break:
break
code += "<<" # point to need_to_execute
code += "-" # need_to_execute=0
code += "]" # # end if
code += ">" # point to expression_value
if comparisons > 0:
code += "]" # "jump back address" of the comparison before us
comparisons -= 1
# end of the switch-case
code += "<" # point to need_to_execute, which becomes next available cell
return code
def get_copy_from_variable_code(ids_map_list, ID_token, current_pointer):
# returns code that copies the value from cell of variable ID to current pointer, and then sets the pointer to the next cell
offset = get_offset_to_variable(ids_map_list, ID_token, current_pointer)
code = "[-]" # res = 0
code += ">[-]" # temp (next cell) = 0
code += "<" * (offset + 1) # point to destination cell
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * (offset + 1) # point to temp
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell, which is temp, which is now zero
return code
def get_token_ID_code(ids_map_list, token, current_pointer):
# generate code that evaluates the ID token at the current pointer, and sets the pointer to point to the next available cell
return get_copy_from_variable_code(ids_map_list, token, current_pointer)
def get_literal_token_code(token):
# generate code that evaluates the token at the current pointer, and sets the pointer to point to the next available cell
assert is_token_literal(token)
if token.type == Token.TRUE:
code = "[-]" # zero current cell
code += "+" # current cell = 1
code += ">" # point to next cell
return code
elif token.type == Token.FALSE:
code = "[-]" # zero current cell
code += ">" # point to next cell
return code
else:
value = get_literal_token_value(token)
code = "[-]" # zero current cell
code += get_set_cell_value_code(value, 0) # set current cell to the value
code += ">" # point to the next cell
return code
def get_divmod_code(right_token=None):
# given that the current pointer points to a, and the cell after a contains b,
# (i.e the cells look like: --> a, b, ?, ?, ?, ?, ...)
# returns a code that calculates divmod, and the cells look like this:
# --> 0, b-a%b, a%b, a/b, 0, 0
# and the pointer points to the first 0 (which is in the same cell as a used to be)
ADD_DIVISION_BY_ZERO_CHECK = True
if right_token is not None and right_token.type == Token.NUM:
if get_NUM_token_value(right_token) == 0:
raise BFSemanticError("Dividing by Zero, at %s" % right_token)
ADD_DIVISION_BY_ZERO_CHECK = False
def get_if_equal_to_0_code(inside_if_code, offset_to_temp_cell):
"""
given a <inside_if_code>, wraps it with an "if (current_cell == 0) {<inside_if_code>}"
in the process, it zeros the current cell
additionally, it uses a temp cell
the argument <offset_to_temp_cell> is the offset from the current cell to the temp cell
*** note that the temp cell must be AFTER the cells that the <inside_if_code> touches ***
<inside_if_code> should assume it starts running when pointing to the current cell
and it should end its run pointing to the same cell
"""
# temp cell is initialized to 1, and holds a flag of whether or not we should run <inside_if_code> or not
# if cell to evaluate is not zero, we set this flag to 0
code = ">" * offset_to_temp_cell # point to temp
code += "[-]+" # temp = 1
code += "<" * offset_to_temp_cell # point to cell to compare to 0
code += "[" # if it is not zero
code += ">" * offset_to_temp_cell # point to temp
code += "-" # temp = 0
code += "<" * offset_to_temp_cell # point to cell
code += "[-]" # zero the cell
code += "]" # end if
code += ">" * offset_to_temp_cell # point to temp cell
code += "[" # if it is non zero
code += "<" * offset_to_temp_cell # point to cell
code += inside_if_code # execute desired code
# at this point we point to the original cell
code += ">" * offset_to_temp_cell # point to temp cell
code += "-" # temp = 0
code += "]" # end if
code += "<" * offset_to_temp_cell # point back to original cell
return code
code = ""
if ADD_DIVISION_BY_ZERO_CHECK:
# create a prefix code: if (b == 0) {print("Error - Division by zero\n");}
# copy b to temp cell (via another temp cell) and compare that cell to 0. if its 0, execute error print and go to infinite loop
code += ">>" # point to empty cell
code += "[-]>[-]" # zero 2 temp cells
code += "<<" # point to b
code += "[>+>+<<-]" # move b to both cells
code += ">" # point to first cell
code += "[<+>-]" # move first cell back to b
code += ">" # point to second cell
code_inside_if = get_print_string_code("Error - Division by zero\n")
code_inside_if += "[]" # infinite loop
code += get_if_equal_to_0_code(code_inside_if, offset_to_temp_cell=1)
code += "<<<" # point to a
# ======================= end of prefix =======================
# a, b, w, x, y, z
code += ">>[-]>[-]>[-]>[-]<<<<<" # zero w,x,y,z, and point to a
code += "[" # while a != 0
code += "-" # decrease a by 1
code += ">-" # decrease b by 1
code += ">+" # increase w by 1
code += "<" # point to b
code += "[->>>+>+<<<<]>>>>[-<<<<+>>>>]" # copy b to y (via z)
code += "<" # point to y
code_inside_if = ""
code_inside_if += "<+" # increase x by 1
code_inside_if += "<" # point to w
code_inside_if += "[-<+>]" # copy w to b (b is already 0) (after this we point to w)
code_inside_if += ">>" # point to y
# get_if_equal_to_0 also zeros y
# i set offset_to_temp_cell = 1 because it can use z, since it is unused inside the if
code += get_if_equal_to_0_code(inside_if_code=code_inside_if, offset_to_temp_cell=1)
code += "<<<<" # point to a
code += "]" # end while
"""
a, b, w, x, y, z
w, x, y, z = 0, 0, 0, 0
while a != 0
a -= 1
b -= 1
w += 1
if b == 0: (this means that w = original b) (implementation: copy b to y (via z) and compare y to 0, (then zero y))
x += 1
b = w
w = 0
at the end:
w = a%b
x = a/b
b = b-a%b
"""
return code
def get_bitwise_code(code_logic):
# a, b, c, w, x, y, z, bit1, bitcounter, res
# code_logic uses the cells y, z, and bit1. Where y is res and z and bit1 are the bits.
# y is zero. z and bit1 should be zero after code_logic.
code = ">" * 7 # point to bit1
code += "[-]" # zero bit1
code += ">" # point to bitcounter
code += ">[-]<" # zero res
code += "[-]--------[++++++++" # while bitcounter != 8:
code += "<"
code += "<[-]" * 5 # clear c, w, x, y, z
code += "++" # c = 2
code += "<<" # point to a
code += "[" # while a != 0:
code += "-" # a -= 1
code += ">>-" # c -= 1
code += "[>+>>+<<<-]>[<+>-]" # copy c to y (using w)
code += ">>" # point to y
code += ">>+<<" # bit1 += 1
code += "-[" # if y != 1:
code += "<+" # x += 1
code += "<<++" # c += 2 (c was 0)
code += ">" * 5 # point to bit1
code += "--" # bit1 -= 2 (bit1 was 2)
code += "<<" # point to y
code += "+" # set y to 0
code += "]" # end if
code += "<<<<<" # point to a
code += "]" # end while
code += ">>>>[<<<<+>>>>-]" # move x to a (x is a/2)
code += "<<[-]++" # c = 2
code += "<" # point to b
code += "[" # while b != 0:
code += "-" # b -= 1
code += ">-" # c -= 1
code += "[>+>>+<<<-]>[<+>-]" # copy c to y (using w)
code += ">>" # point to y
code += ">+<" # z += 1
code += "-[" # if y != 1:
code += ">--<" # z -= 2 (z was 2)
code += "<+" # x += 1
code += "<<++" # c += 2 (c was 0)
code += ">>>" # point to y
code += "+" # set y to 0
code += "]"
code += "<<<<" # point to b
code += "]" # end while
# w is a % 2
# x is a / 2
code += ">>>[<<<+>>>-]" # move x to b
code += ">>" # point to z
code += code_logic # pointer ends at bit1, z and bit1 should be 0 after code
code += ">[<+<+>>-]<[>+<-]" # copy bit to z (using bit1)
# y = y << z
code += "<"
code += "[" # while z != 0:
code += "<" # point to y
code += "[<+>-]" # copy y to x
code += "<[>++<-]" # copy x to y * 2
code += ">>-" # z -= 1
code += "]"
code += "<" # point to y
code += "[>>>>+<<<<-]" # res += y
code += ">>>" # point to bitcounter
code += "-" * 7 # loop if bitcounter != 7
code += "]" # end while
code += ">[<<<<<<<<<+>>>>>>>>>-]" # move res to a
code += "<<<<<<<<" # point to b
return code
def get_unary_prefix_op_code(token, offset_to_variable=None):
# returns code that:
# performs op on an operand that is at the current pointer
# the result is placed in the cell of the operand
# and the pointer points to the cell right after it (which becomes the next available cell)
if token.type == Token.NOT:
# a temp
code = ">" # point to temp
code += "[-]+" # temp = 1
code += "<" # point to a
code += "[" # if a is non-zero
code += ">-" # temp = 0
code += "<[-]" # zero a
code += "]" # end if
code += ">" # point to temp
code += "[" # if temp is non-zero
code += "<+" # a = 1
code += ">-" # temp = 0
code += "]" # end if
return code
elif token.type == Token.INCREMENT:
# returns code that copies the value from the variable's cell at the given offset, and adds 1 to both the copied and the original cell
assert offset_to_variable is not None
offset = offset_to_variable
code = "[-]" # res = 0
code += ">[-]" # temp (next pointer) = 0
code += "<" * (offset + 1) # point to destination cell
code += "+" # increase destination by 1
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * (offset + 1) # point to temp
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell, which is temp, which is now zero
return code
elif token.type == Token.DECREMENT:
# returns code that copies the value from the variable's cell at the given offset, and subtracts 1 from both the copied and the original cell
assert offset_to_variable is not None
offset = offset_to_variable
code = "[-]" # res = 0
code += ">[-]" # temp (next pointer) = 0
code += "<" * (offset + 1) # point to destination cell
code += "-" # decrease destination by 1
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * (offset + 1) # point to temp
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell, which is temp, which is now zero
return code
elif token.type == Token.UNARY_MULTIPLICATIVE:
# returns code that copies the value from the variable's cell at the given offset, modifies both the copied and the original cell depending on the op
assert offset_to_variable is not None
offset = offset_to_variable
if token.data in ["**", "//"]:
code = "[-]" # res = 0
code += ">[-]" # temp (next pointer) = 0
code += "<" * (offset + 1) # point to destination cell
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * offset # point to res
code += ">" # point to temp (**x, //x keep x the same)
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell
return code
elif token.data == "%%":
code = "[-]" # res = 0
code += "<" * offset # point to destination cell
code += "[-]" # zero destination
code += ">" * offset # point to res
code += ">" # point the next available cell
# at this point we point to the next available cell
return code
else:
raise BFSyntaxError("Unexpected unary prefix %s" % str(token))
elif token.type == Token.BITWISE_NOT:
# a temp
code = "[>+<-]" # move a into temp
code += ">" # point to temp
code += "+[<->-]" # invert temp into a
return code
raise NotImplementedError
def get_unary_postfix_op_code(token, offset_to_variable):
# returns code that:
# performs op on operand that is at the current pointer
# the result is placed in the cell of the operand
# and the pointer points to the cell right after it (which becomes the next available cell)
if token.type == Token.INCREMENT:
# returns code that copies the value from the variable's cell at the given offset, and adds 1 to the original cell
offset = offset_to_variable
code = "[-]" # res = 0
code += ">[-]" # temp (next pointer) = 0
code += "<" * (offset + 1) # point to destination cell
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * (offset + 1) # point to temp
code += "+" # increase temp by 1
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell, which is temp, which is now zero
return code
elif token.type == Token.DECREMENT:
# returns code that copies the value from the variable's cell at the given offset, and subtracts 1 from the original cell
offset = offset_to_variable
code = "[-]" # res = 0
code += ">[-]" # temp (next pointer) = 0
code += "<" * (offset + 1) # point to destination cell
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * (offset + 1) # point to temp
code += "-" # decrease temp by 1
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell, which is temp, which is now zero
return code
elif token.type == Token.UNARY_MULTIPLICATIVE:
# returns code that copies the value from the variable's cell at the given offset, and modifies the original cell depending on the operation
offset = offset_to_variable
code = "[-]" # res = 0
code += ">[-]" # temp (next pointer) = 0
code += "<" * (offset + 1) # point to destination cell
code += "[" + ">" * offset + "+>+" + "<" * (offset + 1) + "-]" # increase res and temp, zero destination
code += ">" * (offset + 1) # point to temp
if token.data in ["**", "//"]:
pass # x**, x// keeps x the same
elif token.data == "%%":
# at this point we zeroed x and we point to temp (next available cell)
return code # no need to copy anything back to destination - x%% modifies x to 0
else:
raise BFSyntaxError("Unexpected unary postfix %s" % str(token))
code += "[" + "<" * (offset + 1) + "+" + ">" * (offset + 1) + "-]" # copy temp back to destination
# at this point we point to the next available cell, which is temp, which is now zero
return code
raise NotImplementedError
def get_op_between_literals_code(op_token, right_token=None):
# returns code that:
# performs op on 2 operands
# the first operand is at current pointer, and the second operand is at current pointer + 1
# the code can destroy second operand, and everything after it
# the result is placed in the cell of the first operand
# and the pointer points to the cell right after it (which becomes the next available cell)
op = op_token.data
if op == "+" or op == "-":
code = ">[<" + op + ">-]" # increase/decrease the first operand and decrease the second operand
# the pointer now points to the next available cell, which is the second operand, which is 0
return code
elif op == "*":
# a, b, temp1, temp2
code = ">>[-]" # temp1 = 0
code += ">[-]" # temp2 = 0
code += "<<<" # point to first operand
code += "[>>>+<<<-]" # move first operand to temp2
code += ">>>" # point to temp2
# do in a loop: as long as temp2 != 0
code += "["
code += "<<" # point to second operand
code += "[<+>>+<-]" # add it to first operand and temp1
code += ">" # point to temp1
code += "[<+>-]" # move it to second operand
# end loop
code += ">" # point back to temp2
code += "-" # decrease temp2
code += "]"
code += "<<" # point back to next available cell (second operand)
return code
elif op == "/":
code = get_divmod_code(right_token)
code += ">>>" # point to a/b
code += "[<<<+>>>-]" # copy a/b to current cell
code += "<<" # point to next available cell
return code
elif op == "%":
code = get_divmod_code(right_token)
code += ">>" # point to a%b
code += "[<<+>>-]" # copy a%b to current cell
code += "<" # point to next available cell
return code
# relops
elif op == "==":
# a, b
code = "[->-<]" # a = 0, b = b - a
code += "+" # a = 1. will hold the result. if a!=b, this is unchanged
code += ">" # point to b
code += "[" # if b == 0, enter the following code
code += "<->[-]" # a = 0, b=0
code += "]" # end of "loop"
return code
elif op == "!=":
# a, b
code = "[->-<]" # a = 0, b = b - a
# a will hold the result. if a != b, this is unchanged
code += ">" # point to b
code += "[" # if b == 0, enter the following code
code += "<+>[-]" # a = 1, b=0
code += "]" # end of "loop"
return code
elif op == ">":
# a, b, c, d
code = ">>[-]" # c = 0 (will hold res)
code += ">[-]" # d = 0
code += "<<<" # point to a
code += "[" # while a != 0
code += ">>[-]" # c = 0
code += "<" # point to b
code += "[>+>+<<-]>[<+>-]" # copy b to d (via c)
code += "+" # c = 1 (will hold res)
code += ">" # point to d
code += "[" # if d != 0
code += "[-]" # d = 0
code += "<-" # c = 0
code += "<-" # b -= 1
code += ">>" # point to d
code += "]" # end if
code += "<<<" # point to a
code += "-" # a -= 1
code += "]" # end while
# move c to a
code += ">>" # point to c
code += "[<<+>>-]" # move c to a
code += "<" # point to b (next available cell)
"""
x > y?
res = 0
while x != 0:
res = 1
if y != 0:
res = 0
y -= 1
x -= 1
"""
return code
elif op == "<":
# similar to >
# a, b, c, d
code = ">>[-]" # c = 0 (will hold res)
code += ">[-]" # d = 0
code += "<<" # point to b
code += "[" # while b != 0
code += ">[-]" # c = 0
code += "<<" # point to a
code += "[>>+>+<<<-]>>[<<+>>-]" # copy a to d (via c)
code += "+" # c = 1 (will hold res)
code += ">" # point to d
code += "[" # if d != 0
code += "[-]" # d = 0
code += "<-" # c = 0
code += "<<-" # a -= 1
code += ">>>" # point to d
code += "]" # end if
code += "<<" # point to b
code += "-" # b -= 1
code += "]" # end while
# move c to a
code += "<" # point to a
code += "[-]" # a = 0
code += ">>" # point to c
code += "[<<+>>-]" # move c to a
code += "<" # point to b (next available cell)
"""
x < y?
res = 0
while y != 0:
res = 1
if x != 0:
res = 0
x -= 1
y -= 1
"""
return code
elif op == "<=":
# a, b, c, d
code = ">>[-]+" # c = 1 (will hold res)
code += ">[-]" # d = 0
code += "<<<" # point to a
code += "[" # while a != 0
code += ">>[-]" # c = 0
code += "<" # point to b
code += "[>+>+<<-]>[<+>-]" # copy b to d (via c)
code += ">" # point to d
code += "[" # if d != 0
code += "[-]" # d = 0
code += "<+" # c = 1
code += "<-" # b -= 1
code += ">>" # point to d
code += "]" # end if
code += "<<<" # point to a
code += "-" # a -= 1
code += "]" # end while
# move c to a
code += ">>" # point to c
code += "[<<+>>-]" # move c to a
code += "<" # point to b (next available cell)
"""
x <= y?
res = 1
while x != 0:
res = 0
if y != 0:
res = 1
y -= 1
x -= 1
"""
return code
elif op == ">=":
# similar to <=
# a, b, c, d
code = ">>[-]+" # c = 1 (will hold res)
code += ">[-]" # d = 0
code += "<<" # point to b
code += "[" # while b != 0
code += ">[-]" # c = 0
code += "<<" # point to a
code += "[>>+>+<<<-]>>[<<+>>-]" # copy a to d (via c)
code += ">" # point to d
code += "[" # if d != 0
code += "[-]" # d = 0
code += "<+" # c = 1
code += "<<-" # a -= 1
code += ">>>" # point to d
code += "]" # end if
code += "<<" # point to b
code += "-" # b -= 1
code += "]" # end while
# move c to a
code += "<" # point to a
code += "[-]" # a = 0
code += ">>" # point to c
code += "[<<+>>-]" # move c to a
code += "<" # point to b (next available cell)
"""
x >= y?
res = 1
while y != 0:
res = 0
if x != 0:
res = 1
x -= 1
y -= 1
"""
return code
elif op == "<<":
# a, b, temp
code = ">>[-]" # zero temp
code += "<" # point to b
code += "[" # while b != 0
code += "<" # point to a
code += "[>>+<<-]" # copy a to temp
code += ">>" # point to temp
code += "[<<++>>-]" # multiply temp by 2 and store result in a
code += "<-" # point to b and b -= 1
code += "]" # end while
return code
elif op == ">>":
# a, b, c, x, y, z
code = ">" # point to b
code += ">[-]" * 4 # clear 4 cells
code += "<" * 4 # point to b
code += "[" # while b != 0
code += ">++" # set c to 2
code += "<<" # point to a
code += "[" # while a != 0
code += "-" # a -= 1
code += ">>-" # c -= 1
code += "[>>+>+<<<-]>>>[<<<+>>>-]" # copy c to y (via z)
code += "<" # point to y
code += "-[" # if y == 0
code += "<+" # x += 1
code += "<++" # set c to 2
code += ">>"
code += "+" # zero y
code += "]" # end if
code += "<<<<" # point to a
code += "]" # end while
code += ">>>" # point to x
code += "[<<<+>>>-]" # move x to a
code += "<[-]" # zero c
code += "<-" # b -= 1
code += "]" # end while
return code
elif op_token.type == Token.BITWISE_AND:
code = get_bitwise_code("[->[-<<+>>]<]>[-]")
return code
elif op_token.type == Token.BITWISE_OR:
code = get_bitwise_code("[>+<-]>[[-]<<+>>]")
return code
elif op_token.type == Token.BITWISE_XOR:
code = get_bitwise_code("[>-<-]>[[-]<<+>>]")
return code
raise NotImplementedError
def get_op_boolean_operator_code(node, current_pointer):
# short-circuit evaluation of AND and OR
assert node.token.type in [Token.AND, Token.OR]
if node.token.type == Token.AND:
# result, operand
code = "[-]" # zero result
code += ">" # point to next cell
code += node.left.get_code(current_pointer + 1) # evaluate first operand
code += "<" # point to first operand
code += "[" # if it is non-zero
code += "[-]" # zero first operand
code += node.right.get_code(current_pointer + 1) # evaluate second operand
code += "<" # point to second operand
code += "[" # if it is non-zero
code += "<+>" # result = 1
code += "[-]" # zero second operand
code += "]" # end if
code += "]" # end if
# now we point to one after result (next available cell)
return code
elif node.token.type == Token.OR:
# result, check_second_operand/second_operand, first_operand
code = "[-]" # zero result
code += ">" # point to check_second_operand
code += "[-]+" # check_second_operand = 1
code += ">" # point to next cell
code += node.left.get_code(current_pointer + 2) # evaluate first operand
code += "<" # point to first operand
code += "[" # if it is non-zero
code += "<<+" # result = 1
code += ">-" # check_second_operand = 0
code += ">[-]" # zero first operand
code += "]" # end if
code += "<" # point to check_second_operand
code += "[" # if check_second_operand
code += node.right.get_code(current_pointer + 1) # evaluate second operand
code += "<" # point to second operand
code += "[" # if it is non-zero
code += "<+>" # result = 1
code += "[-]" # zero second operand
code += "]" # end if
code += "]" # end if
# now we point to one after result (next available cell)
return code
raise NotImplementedError
def get_print_string_code(string):
code = "[-]" # zero the current cell
code += ">[-]" # zero the next cell (will be used for loop counts)
code += "<" # point to the original cell ("character" cell)
prev_value = 0
for i in range(len(string)):
current_value = ord(string[i])
code += get_set_cell_value_code(current_value, prev_value, zero_next_cell_if_necessary=False)
code += "."
prev_value = current_value
return code
def get_move_right_index_cells_code(current_pointer, node_index):
# used for arrays
# returns a code that evaluates the index, then moves the pointer right, <index> amount of cells
# at the end of execution, the layout is:
# 0 index next_available_cell (point to next available cell)
# index, steps_taken_counter
code = node_index.get_code(current_pointer) # index
code += "[-]" # counter = 0
code += "<" # point to index
code += "[" # while index != 0
code += ">>" # point to new_counter (one after current counter)
code += "[-]+" # zero new_counter then add 1 to the new_counter
code += "<" # move to old counter
code += "[>+<-]" # add old counter to new counter
code += "<" # point to old index
code += "-" # sub 1 from old index
code += "[>+<-]" # move old index to new index
code += ">" # point to new index
code += "]" # end while
# old_index=0 new_index res (pointing to old index)
code += ">>" # point to res
return code
def get_move_left_index_cell_code():
# used for arrays
# complement of "get_move_right_index_cells_code"
# assumes the layout is:
# value, index (pointing to index)
# moves <index> cells left, and moving <value> along with it
# in the end, point to one cell after <value> (which becomes the next available cell)
# layout: res, index (pointing to index)
code = "[" # while new_index != 0
code += "<" # point to res
code += "[<+>-]" # move res to the left
code += ">" # point to new_index
code += "-" # sub 1 from index
code += "[<+>-]" # move new_index to left
code += "<" # point to new index
code += "]" # end while
# now res is at the desired cell, and we point to the next available cell
return code
# =================
# General
# =================
def get_literal_token_value(token):
# known at compilation time
assert is_token_literal(token)
if token.type == Token.NUM:
return get_NUM_token_value(token)
elif token.type == Token.TRUE:
return 1
elif token.type == Token.FALSE:
return 0
elif token.type == Token.CHAR:
return ord(token.data)
def get_NUM_token_value(token):
if token.data.startswith("0x"):
return int(token.data, 16)
else:
return int(token.data)
def get_variable_from_ID_token(ids_map_list, ID_token):
ID = ID_token.data
# given an id, goes through the ids map list and returns the index of the first ID it finds
for i in range(len(ids_map_list)):
ids_map = ids_map_list[i].IDs_dict
if ID in ids_map:
return ids_map[ID]
raise BFSemanticError("'%s' does not exist" % str(ID_token))
def dimensions_to_size(dimensions):
return reduce(lambda x, y: x * y, dimensions)
def get_variable_dimensions_from_token(ids_map_list, ID_token):
variable = get_variable_from_ID_token(ids_map_list, ID_token)
return variable.dimensions
def get_id_index(ids_map_list, ID_token):
variable = get_variable_from_ID_token(ids_map_list, ID_token)
return variable.cell_index
def get_offset_to_variable(ids_map_list, ID_token, current_pointer):
offset = current_pointer - get_id_index(ids_map_list, ID_token)
return offset
def is_token_literal(token):
# token with value that is known at compilation time
return token.type in [Token.TRUE, Token.FALSE, Token.NUM, Token.CHAR]
|
_plants = {
'C': 'Clover',
'G': 'Grass',
'R': 'Radishes',
'V': 'Violets'
}
_children = [
'Alice', 'Bob', 'Charlie', 'David',
'Eve', 'Fred', 'Ginny', 'Harriet',
'Ileana', 'Joseph', 'Kincaid', 'Larry'
]
class Garden():
def __init__(self, garden, students=_children):
self.rows = garden.split()
self.students = sorted(students)
def plants(self, child):
i = self.students.index(child) * 2
j = i + 2
p = self.rows[0][i:j] + self.rows[1][i:j]
return [_plants[c] for c in p]
if __name__ == '__main__':
garden = Garden("VVCCGG\nVVCCGG")
print(garden.plants("Bob"))
|
#!/usr/bin/env python
# Funtion:
# Filename:
# 假设密码为 123
# 则shell下自动输入密码的语句为:
# echo "123" | sudo -S apt-get upgrade
# echo "123" | sudo -S apt-get install vim
# 可以查看标准文档 sudo --help
import subprocess
subprocess.Popen("echo '123' | sudo -S apt-get upgrade", shell=True) |
from flask import request
from gateway.app import app
from gateway.http_client import filemanager_http_client
from gateway.utils.handle_api import (
get_client_username,handle_request_response
)
@app.route('/file/create',methods=['POST'])
@handle_request_response
@get_client_username
def file_create(client_username:str):
body=request.json
status_code, resp_body = filemanager_http_client.post(
'file/create',client_username,json=body
)
return status_code, resp_body |
import time
import os
import torch
import math
import logging
from conf.train.train_conf_expandnet import get_config
from utils import utils
from models import create_model
from data import create_dataset, create_dataloader
def main():
conf = get_config()
utils.mkdir_experiments(conf.experiments_dir)
utils.setup_logger(None, conf.experiments_dir, 'train', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
# set gpu
gpu_list = ','.join(str(x) for x in conf.gpu_ids)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
print('export CUDA_VISIBLE_DEVICES=' + gpu_list)
# set tensorboard
if conf.use_tb_logger:
from tensorboardX import SummaryWriter
utils.mkdir(conf.log_dir)
tb_logger = SummaryWriter(log_dir=conf.log_dir)
torch.backends.cudnn.benckmark = True
# set dataset
train_dataset = create_dataset(conf)
train_dataloader = create_dataloader(train_dataset, conf)
train_size = int(math.ceil(len(train_dataset) / conf.batch_size))
print('Number of train images: {:,d}, iters: {:,d} per epoch'.format(len(train_dataset), train_size))
print('Total iters {:,d} for epochs: {:d} '.format(conf.epoch*train_size, conf.epoch))
if conf.resume:
resume_state = torch.load(conf.resume)
else:
resume_state = None
# set model
model = create_model(conf)
if resume_state:
start_epoch = resume_state['epoch']
current_step = resume_state['iter']
print('Resuming training from epoch: {}, iter: {}.'.format(
resume_state['epoch'], resume_state['iter']))
model.resume_training(resume_state) # handle optimizers and schedulers
else:
current_step = 0
start_epoch = 0
print('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
iter_time = time.time()
for epoch in range(start_epoch, conf.epoch):
for _, train_data in enumerate(train_dataloader):
current_step += 1
model.update_learning_rate()
# training
model.feed_data(train_data)
model.optimize_parameters(current_step)
# log
if current_step % conf.print_freq == 0:
logs = model.get_current_log()
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
epoch, current_step, model.get_current_learning_rate())
for k, v in logs.items():
message += '{:s}: {:.4e} '.format(k, v)
# tensorboard logger
if conf.use_tb_logger:
tb_logger.add_scalar(k, v, current_step)
message += '200 iters time: %4.4f' % (time.time() - iter_time)
iter_time = time.time()
logger.info(message)
if current_step % conf.save_freq == 0:
print('Saving models and training states.')
model.save(current_step)
model.save_training_state(epoch, current_step)
logger.info('Saving the final model.')
model.save('latest')
logger.info('End of training.')
if __name__ == '__main__':
main() |
from rest_framework import generics, status, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from .models import Profile
from authors.apps.authentication.models import User
from authors.apps.articles.models import Article
from .renderers import ProfileRenderer
from .serializers import ProfileSerializer
from .exceptions import ProfileDoesNotExist
from .permissions import IsOwnerOrReadOnly
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
class ProfileRetrieveUpdateView(generics.GenericAPIView):
"""
Class for retriving and updating user profile
"""
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
renderer_classes = (ProfileRenderer,)
serializer_class = ProfileSerializer
def get(self, request, *args, **kwargs):
# Function to retrieve user profile
serializer = self.serializer_class(
get_object_or_404(Profile, user=self.kwargs.get("username"))
)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
# Function to update user profile
try:
user = get_user_model().objects.get(
username=self.kwargs.get("username")
)
except ObjectDoesNotExist:
raise Http404()
profile = Profile.objects.get(user=user.username)
if not profile.user.pk == request.user.id:
return Response(
{"detail": "You don't have permissions to update this user"},
status=status.HTTP_403_FORBIDDEN,
)
serializer = self.serializer_class(
profile, data=request.data["profile"], partial=True
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class ListProfiles(APIView):
"""
Class to list author profiles
"""
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, *args, **kwargs):
articles = Article.objects.distinct("author").all()
authors = Profile.objects.filter(
user_id__in=[article.author.username for article in articles]
)
serializer = ProfileSerializer(authors, many=True)
return Response(data={"authors":serializer.data}, status=status.HTTP_200_OK)
|
"""
Definition of forms.
"""
from django import forms
from app.models import WellInstance, WellInfo, GeoInfo, RiskProfile
class WellForm(forms.ModelForm):
class Meta:
model = WellInstance
fields = ['Country', 'State', 'City', 'Well']
class WellInfoForm(forms.ModelForm):
class Meta:
model = WellInfo
fields = ['NumberOfEstablishedPlantsNearby','NumberOfDevelopmentalPlantsNearby','OperatorName', 'WellStatus', 'ConversionTechnology', 'CoolingType', 'AgeOfWellInYears','WellDepthInMeters']
class GeoInfoForm(forms.ModelForm):
class Meta:
model = GeoInfo
fields = ['Tempreature','Ph','CO2']
class RiskProfileForm(forms.ModelForm):
class Meta:
model = RiskProfile
fields = ['High','Medium','Low','Score','Notes']
|
#!/usr/bin/python2.7
# Standard library imports.
from contextlib import closing
from sqlite3 import connect
# The path of the database.
db_path = r"/mnt/c/Users/Amanda/Desktop/spring-2018/is211/pets.db"
# The query used to query people.
select_person = """
SELECT first_name, last_name, age
FROM person
WHERE id = ?
"""
# The query used to query pets.
select_pet = """
SELECT name, breed, dead, age
FROM pet
JOIN person_pet
ON pet_id = id
WHERE person_id = ?
"""
# The function to be executed repeatedly.
def perform_query():
# Fetch the user input.
try:
id = int(raw_input("Please enter the person ID you wish to search for: "))
except ValueError:
print "Invalid input. Please try again."
return perform_query()
# Exit as necessary.
if id == -1:
return
# Execute the necessary insert statements.
with closing(connect(db_path, isolation_level=None)) as conn:
cursor = conn.cursor()
# Retrieve any matching people.
result = cursor.execute(select_person, [id]).fetchone()
if not result:
print "No people exist for the given ID."
return perform_query()
print "%s %s, %d years old" % result
# Retrieve any match pets.
for name, breed, dead, age in cursor.execute(select_pet, [id]):
print "%s %s %s %s, a %s that %s %d years old." % (
result[0], result[1], "owned" if dead else "owns", name,
breed, "was" if dead else "is", age)
perform_query()
if __name__ == "__main__":
perform_query()
|
import pytest
from autumn.settings import Models
from autumn.core.project.project import _PROJECTS, get_project
COVID_PROJECTS = list(_PROJECTS[Models.COVID_19].keys())
COVID_CALIBS = list(zip(COVID_PROJECTS, [Models.COVID_19] * len(COVID_PROJECTS)))
TB_PROJECTS = list(_PROJECTS[Models.TB].keys())
TB_CALIBS = list(zip(TB_PROJECTS, [Models.TB] * len(TB_PROJECTS)))
SM_SIR_PROJECTS = list(_PROJECTS[Models.SM_SIR].keys())
SM_SIR_CALIBS = list(zip(SM_SIR_PROJECTS, [Models.SM_SIR] * len(SM_SIR_PROJECTS)))
@pytest.mark.github_only
@pytest.mark.nightly_only
@pytest.mark.calibrate_models
@pytest.mark.parametrize("project_name, model_name", COVID_CALIBS)
def test_calibration_covid19(project_name, model_name):
"""
Calibration smoke test - make sure everything can run for 10 seconds without exploding.
"""
project = get_project(model_name, project_name)
project._calibrate(max_seconds=10, chain_idx=1, num_chains=1)
@pytest.mark.github_only
@pytest.mark.nightly_only
@pytest.mark.calibrate_models
@pytest.mark.parametrize("project_name, model_name", TB_CALIBS)
def test_calibration_tb(project_name, model_name):
"""
Calibration smoke test - make sure everything can run for 10 seconds without exploding.
"""
project = get_project(model_name, project_name)
project._calibrate(max_seconds=10, chain_idx=1, num_chains=1)
@pytest.mark.github_only
@pytest.mark.calibrate_models
@pytest.mark.parametrize("project_name, model_name", SM_SIR_CALIBS)
def test_calibration_sm_sir(project_name, model_name):
"""
Calibration smoke test - make sure everything can run for 10 seconds without exploding.
"""
project = get_project(model_name, project_name)
project._calibrate(max_seconds=10, chain_idx=1, num_chains=1)
|
import os
from glob import glob
import random
import numpy as np
from PIL import Image
def Ramen_Dataset():
root_dir = os.path.dirname(os.path.realpath(__file__))
train_path = os.path.join(root_dir, 'dataset', 'train', '*\\*.png')
test_path = os.path.join(root_dir, 'dataset', 'val', '*\\*.png')
train_file = glob(train_path)
test_file = glob(test_path)
return train_file, test_file
def read_image(path):
image = Image.open(path)
return np.asarray(image.resize((600, 600)))
def DataLoader(files, batch_size=32):
random.shuffle(files)
for i in range(0, len(files), batch_size):
batch_data = np.array(files[i:i + batch_size])
batch_labels = np.zeros(batch_size)
batch_images = np.zeros((batch_size, 600, 600, 3))
#batch_labelf = np.core.defchararray.split(batch_data, sep='\\')
for n, file in enumerate(batch_data):
batch_images[n,:,:,:] = read_image(file)
batch_labels[n] = file.split('\\')[-2]
yield batch_images, batch_labels
tr, te = Ramen_Dataset()
train_Loader = DataLoader(tr, 32)
for batch in train_Loader:
print(batch)
|
from bocadillo import App,view
app = App()
@app.route("/")
async def index(req, res):
res.text = ""
@app.route("/user")
@view(methods=["post"])
async def greet(req, res):
res.text = ""
@app.route("/user/{id}")
async def user_info(req, res, id):
res.text = id
|
import pytest
import pytest_check as ck
@pytest.mark.p0
@pytest.mark.api
def test_query_fuel_card_normal(api,data):
'''正常查询加油卡'''
request_data = data.get('test_query_fuel_card_normal')
res_dict = api.request_all(request_data).json()
print(f'响应数据{res_dict}')
# 响应断言
ck.equal(200, res_dict.get("code"))
ck.equal("成功返回", res_dict.get("msg"))
ck.is_true(res_dict.get('success'))
@pytest.mark.p1
@pytest.mark.api
def test_query_fuel_card_norma2(api,data):
'''无查询信息'''
request_data = data.get('test_query_fuel_card_norma2')
res_dict = api.request_all(request_data).json()
print(f'响应数据{res_dict}')
# 响应断言
ck.equal(400, res_dict.get("code"))
ck.equal("无查询信息", res_dict.get("msg"))
ck.is_false(res_dict.get('success'))
@pytest.mark.p2
@pytest.mark.api
def test_query_fuel_card_norma3(api,data):
'''代码错误'''
request_data3 = data.get('test_query_fuel_card_norma3')
try:
res_dict = api.request_all(request_data3).json()
print(f'响应数据{res_dict}')
except:
pass
@pytest.mark.p2
@pytest.mark.api
def test_query_fuel_card_norma4(api,data):
'''代码错误'''
try:
request_data4 = data.get('test_query_fuel_card_norma4')
res_dict = api.request_all(request_data4).json()
print(f'响应数据{res_dict}')
except:
pass
@pytest.mark.p2
@pytest.mark.api
def test_query_fuel_card_norma5(api,data):
'''代码错误'''
try:
request_data5 = data.get('test_query_fuel_card_norma5')
res_dict = api.request_all(request_data5).json()
print(f'响应数据{res_dict}')
except:
pass
if __name__ == "__main__":
pytest.main(["-s", r"D:\TestTool\Python\Location\longteng17_1\longteng17\test_cases\api_test\test_query_fuel_card.py"])
|
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from youtube.services import YoutubeService
from youtube.models import Video
from core.utils import db_table_exists
class Command(BaseCommand):
help = 'Get MostViewed Videos from Youtube and adds them to database'
def add_arguments(self, parser):
parser.add_argument('username', type=str, default='Google')
def handle(self, *args, **options):
if not(db_table_exists('youtube_video')):
raise CommandError('There are no youtube_video table in database, migrate first!')
youtube = YoutubeService()
my_videos = youtube.get_my_videos(options['username'])
if not my_videos:
raise CommandError('No Videos for that username!')
Video.objects.filter(expiration_date__lte=timezone.now()).delete()
Video.objects.bulk_create(my_videos)
self.stdout.write(self.style.SUCCESS('Successfully added Your Videos to database!'))
|
# pylint: disable=duplicate-code, too-many-statements
''' Unit test for user model integration '''
import unittest
import logging
from test.common import async_test, UserModelTestCase
from PIL import Image
from utils import image_to_file
# Initialize loggers
logging.basicConfig(level=logging.WARNING)
class TestTrainPredict(UserModelTestCase):
''' Test user model '''
@async_test
async def test_no_permission(self):
''' Test with no permission '''
self.database.drop_testing_database() # Clear database
user = self.user
for cmd in ['help', 'start', 'user', 'admin', 'train', 'done',
'addadmin', 'adduser']:
await user.send_message('/{}'.format(cmd))
message = await user.get_message()
self.assertEqual(message, None)
# Send photo with no face
no_face_image = Image.new('RGB', (30, 30), color='white')
no_face_photo = image_to_file(no_face_image)
await user.send_photo(no_face_photo)
message = await user.get_message(10)
self.assertEqual(message, None)
@async_test
async def test_user_permission(self):
''' Test with user permission '''
# Setup permission
self.database.drop_testing_database() # Clear database
if '@' not in self.user.user_id:
self.user.user_id = '@' + self.user.user_id
self.database.add_user(self.user.user_id, 'user')
# Test forbidden commands
user = self.user
for cmd in ['user', 'admin', 'train', 'done', 'addadmin', 'adduser']:
await user.send_message('/{}'.format(cmd))
message = await user.get_message()
self.assertTrue('Permission denied' in message.text)
# Send photo with no face
no_face_image = Image.new('RGB', (30, 30), color='white')
no_face_photo = image_to_file(no_face_image)
await user.send_photo(no_face_photo)
message = await user.get_message(10)
self.assertTrue('No model' in message.text
or 'No face found' in message.text)
@async_test
async def test_admin_permission(self):
''' Test with admin permission '''
# Setup permission
self.database.drop_testing_database() # Clear database
if '@' not in self.user.user_id:
self.user.user_id = '@' + self.user.user_id
self.database.add_user(self.user.user_id, 'admin')
# Test forbidden commands
user = self.user
for cmd in ['admin', 'addadmin']:
await user.send_message('/{}'.format(cmd))
message = await user.get_message()
self.assertTrue('Permission denied' in message.text)
# Run predict
no_face_image = Image.new('RGB', (30, 30), color='white')
no_face_photo = image_to_file(no_face_image)
await user.send_photo(no_face_photo)
message = await user.get_message(10)
self.assertTrue('No model' in message.text
or 'No face found' in message.text)
# Run /train with label
tag = 'testlabel1'
await user.send_message('/train {}'.format(tag))
message = await user.get_message()
self.assertTrue(tag in message.text)
# Send photo with one face
one_face_photo = open('./test/media/wong_1.jpg', 'rb')
await user.send_photo(one_face_photo)
one_face_photo.close()
# Check message
message = await user.get_message()
self.assertTrue(tag in message.text)
self.assertTrue('more' in message.text)
await user.send_message('/done')
message = await user.get_message()
self.assertTrue('Done' in message.text)
self.assertTrue(tag in message.text)
# List users
await user.send_message('/user')
message = await user.get_message()
self.assertTrue('List of users' in message.text)
# Add user
if '@' not in self.user.bot_id:
self.user.bot_id = '@' + self.user.bot_id
await user.send_message('/adduser {}'.format(self.user.bot_id))
message = await user.get_message()
self.assertTrue('Added user' in message.text)
self.assertTrue(self.user.bot_id in message.text)
# Cancel on removing user
await user.send_message('/user')
message = await user.get_message()
self.assertTrue('List of users' in message.text)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Click on user
await message.click(0)
# Display Remove and cancel button
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 2)
await message.click(1) # Cancel
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Remove user
await user.send_message('/user')
message = await user.get_message()
self.assertTrue('List of users' in message.text)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Click on user
await message.click(0)
# Display Remove and cancel button
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 2)
await message.click(0) # Remove
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 0)
@async_test
async def test_root_admin_permission(self):
''' Test with root_admin permission '''
# Setup permission
self.database.drop_testing_database() # Clear database
if '@' not in self.user.user_id:
self.user.user_id = '@' + self.user.user_id
self.database.add_user(self.user.user_id, 'root_admin')
user = self.user
# Run predict
no_face_image = Image.new('RGB', (30, 30), color='white')
no_face_photo = image_to_file(no_face_image)
await user.send_photo(no_face_photo)
message = await user.get_message(10)
self.assertTrue('No model' in message.text
or 'No face found' in message.text)
# Run /train with label
tag = 'testlabel1'
await user.send_message('/train {}'.format(tag))
message = await user.get_message()
self.assertTrue(tag in message.text)
# Send photo with one face
one_face_photo = open('./test/media/wong_1.jpg', 'rb')
await user.send_photo(one_face_photo)
one_face_photo.close()
# Check message
message = await user.get_message()
self.assertTrue(tag in message.text)
self.assertTrue('more' in message.text)
await user.send_message('/done')
message = await user.get_message()
self.assertTrue('Done' in message.text)
self.assertTrue(tag in message.text)
# List users
await user.send_message('/user')
message = await user.get_message()
self.assertTrue('List of users' in message.text)
# Add user
if '@' not in self.user.bot_id:
self.user.bot_id = '@' + self.user.bot_id
await user.send_message('/adduser {}'.format(self.user.bot_id))
message = await user.get_message()
self.assertTrue('Added user' in message.text)
self.assertTrue(self.user.bot_id in message.text)
# Cancel on removing user
await user.send_message('/user')
message = await user.get_message()
self.assertTrue('List of users' in message.text)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Click on user
await message.click(0)
# Display Remove and cancel button
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 2)
await message.click(1) # Cancel
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Remove user
await user.send_message('/user')
message = await user.get_message()
self.assertTrue('List of users' in message.text)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Click on user
await message.click(0)
# Display Remove and cancel button
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 2)
await message.click(0) # Remove
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 0)
# List admins
await user.send_message('/admin')
message = await user.get_message()
self.assertTrue('List of admins' in message.text)
# Add admin
if '@' not in self.user.bot_id:
self.user.bot_id = '@' + self.user.bot_id
await user.send_message('/addadmin {}'.format(self.user.bot_id))
message = await user.get_message()
self.assertTrue('Added admin' in message.text)
self.assertTrue(self.user.bot_id in message.text)
# Cancel on removing admin
await user.send_message('/admin')
message = await user.get_message()
self.assertTrue('List of admins' in message.text)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Click on admin
await message.click(0)
# Display Remove and cancel button
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 2)
await message.click(1) # Cancel
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Remove admin
await user.send_message('/admin')
message = await user.get_message()
self.assertTrue('List of admins' in message.text)
self.assertEqual(message.button_count, 1)
self.assertTrue(self.user.bot_id in message.buttons[0][0].text)
# Click on admin
await message.click(0)
# Display Remove and cancel button
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 2)
await message.click(0) # Remove
message = await user.get_message(last=True)
self.assertEqual(message.button_count, 0)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
# 727. Minimum Window Subsequence
'''
Given strings S and T, find the minimum (contiguous) substring W of S, so that T is a subsequence of W.
If there is no such window in S that covers all characters in T, return the empty string "". If there are multiple such minimum-length windows, return the one with the left-most starting index.
'''
Basic idea: DP
class Solution:
def minWindow(self, S: str, T: str) -> str:
m, n = len(S), len(T)
import functools
@functools.lru_cache(None)
def dp(i,j):
if j == -1:
return 0
if i < j:
return float('inf')
if S[i] == T[j]:
return dp(i-1, j-1) + 1
else:
return dp(i-1, j) + 1
minlens = [dp(i,n-1) for i in range(m)]
mintup = list(enumerate(minlens))
mintup.sort(key=lambda x:(x[1], x[0]))
idx, length = mintup[0]
if length < float('inf'):
return S[idx-length+1:idx+1]
return ''
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
#------------------------------------------------------------------------------
# Input Shaping Module - InputShaping.py
#
# Python module for the input shaping toolbox
# - Adapted from MATLAB input shaping toolbox
#
# Created: 2/18/13 - Joshua Vaughan - joshua.vaughan@louisiana.edu
#
# Modified:
# * 2/19/13 - JEV - joshua.vaughan@louisiana.edu
# - Added positive ZV-type input shapers
# - Added positive EI-type input shapers
# * 2/20/13 - JEV
# - Added UM-ZV-type input shapers
# - Added UM-EI-type input shapers
# * 2/26/13 - JEV
# - Added sensplot
# * 3/26/13 - JEV
# - began adding proper docstrings for use with help(___) or ___?
# * 09/19/14 - JEV
# - fixed numpy namespace
# * 12/27/14 - JEV
# - some improved formatting to be more idiomatic, still more to do
# * 01/07/15 - JEV
# - began move to class based structure
# * 02/01/15 - JEV
# - began work on two mode shapers
# - added EI shaper and parent tolerable level shaper class
# * 02/16/15 - JEV
# - Finished class conversion for all "common" shapers
# - Added common input types as functions
# - Added functional shaped-command formulation
#------------------------------------------------------------------------------
"""
import numpy as np
import warnings
import matplotlib.pyplot as plt
from abc import ABCMeta, abstractmethod
# Let's also improve the printing of NumPy arrays.
np.set_printoptions(suppress=True, formatter={'float': '{: 0.4f}'.format})
# Define a few constants
HZ_to_rads = 2.0 * np.pi
class Shaper(object):
''' Parent class for all shapers
Attributes:
shaper : exact representation of the shaper
digitized_shaper : digitized version of the shaper
duration : duration of the shaper (s)
type : type of shaper
amp_type : type of amplitude constraints (Positive, Negative, or SNA)
design_freq : the frequency used to design the shaper (Hz)
design_damping : the damping ratio used to design the Shaper
'''
__metaclass__ = ABCMeta
def __init__(self, frequency, zeta, deltaT = 0.01):
""" Shaper Initialization function
Parses the user inputs and calls solve_for_shaper method
to get amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version, default = 0.01
"""
self.design_freq = frequency
self.design_damping = zeta
self.design_deltaT = deltaT
self.shaper, self.digitized_shaper = self.solve_for_shaper(frequency, zeta, deltaT)
self.times = self.shaper[:,0]
self.amps = self.shaper[:,1]
self.duration = self.times[-1]
def __str__(self):
""" Set up pretty printing of the shaper """
type = 'Shaper Type \t \t \t {}\n'.format(self.type)
designfreq = 'Design Frequency \t \t {:0.4f}\t Hz\n'.format(self.design_freq)
designdamp = 'Design Damping Ratio \t \t {:0.4f}\n'.format(self.design_damping)
duration = 'Duration \t \t \t {:0.4f}\t s \n'.format(self.duration)
shaper = '\n' + ' ti Ai \n{}\n'.format(self.shaper)
return '\n' + type + designfreq + designdamp + duration + shaper
def plot_sensitivity(self):
""" Method to plot the sensitivity curve for the shaper between 0 and
2x the design frequency
For other, using the Input Shaping module sensplot function
"""
sensplot(self.shaper, 0.0, 2.0*self.design_freq,
self.design_damping, numpoints = 2000, plotflag = 1)
@abstractmethod
def solve_for_shaper(self, *args):
""" Return the shaper impulse amplitudes and times"""
pass
#----- Positive ZV-Form Shapers (ZV, ZVD, ZVDD, ...) --------------------------
class ZV(Shaper):
""" Class describing a ZV shaper """
type = 'ZV'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
The created instance, uses instance variables for calculation
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
K = np.exp(-zeta*np.pi / (np.sqrt(1-zeta**2)))
# Set up the impulse time spacing
shaperdeltaT = np.pi / (wn*np.sqrt(1-(zeta)**2))
# Define the impulse times
times = np.array([[0.0], [shaperdeltaT]])
# Define the shaper impulse amplitudes
amps = np.array([[1.0 / (1 + K)], [K / (1 + K)]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZVD(Shaper):
""" Class describing a ZVD shaper """
type = 'ZVD'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
K = np.exp(-zeta*np.pi / (np.sqrt(1-zeta**2)))
# Set up the impulse time spacing
shaperdeltaT = np.pi / (wn*np.sqrt(1-(zeta)**2))
# Define the impulse times
times = np.array([[0.0], [shaperdeltaT], [2.0*shaperdeltaT]])
# Define the shaper impulse amplitudes
amps = np.array([[1.0 / (1 + 2*K + K**2)],
[2.0*K / (1 + 2*K + K**2)],
[K**2 / (1 + 2*K + K**2)]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZVDD(Shaper):
""" Class describing a ZVDD shaper """
type = 'ZVDD'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
K = np.exp(-zeta*np.pi / (np.sqrt(1-zeta**2)))
# Set up the impulse time spacing
shaperdeltaT = np.pi / (wn*np.sqrt(1-(zeta)**2))
# Define the impulse times
times = np.array([[0.0], [shaperdeltaT], [2.0*shaperdeltaT], [3.0*shaperdeltaT]])
# Define the shaper impulse amplitudes
amps = np.array([[1.0 / (1 + 3*K + 3*K**2 + K**3)],
[3.0*K / (1 + 3*K + 3*K**2 + K**3)],
[3*K**2 / (1 + 3*K + 3*K**2 + K**3)],
[K**3 / (1 + 3*K + 3*K**2 + K**3)]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZVDDD(Shaper):
""" Class describing a ZVDDD shaper """
type = 'ZVDDD'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
K = np.exp(-zeta*np.pi / (np.sqrt(1-zeta**2)))
# Set up the impulse time spacing
shaperdeltaT = np.pi / (wn*np.sqrt(1-(zeta)**2))
# Define the impulse times
times = np.array([[0.0], [shaperdeltaT], [2.0*shaperdeltaT],
[3.0*shaperdeltaT], [4.0*shaperdeltaT]])
# Define the shaper impulse amplitudes
amps = np.array([[1.0 / (1 + 4*K + 6*K**2 + 4*K**3 + K**4)],
[4.0*K / (1 + 4*K + 6*K**2 + 4*K**3 + K**4)],
[6.0*K**2 / (1 + 4*K + 6*K**2 + 4*K**3 + K**4)],
[4.0*K**3 / (1 + 4*K + 6*K**2 + 4*K**3 + K**4)],
[K**4 / (1 + 4*K + 6*K**2 + 4*K**3 + K**4)]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
#----- UM-ZV-Form Shapers (ZV, ZVD, ZVDD, ...) --------------------------------
class UMZV(Shaper):
""" Class describing a UM-ZV shaper """
type = 'UM-ZV'
amp_type = 'Negative'
isPositive = False
def solve_for_shaper(self, frequency, zeta, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
tau = 1.0 / frequency
if zeta > 0.4:
warnings.warn('\n \nWARNING: Damping Ratio is probably too large.\n')
# Define the impulse times
times = np.array([[0.0],
[(0.16658 + 0.29277 * zeta + 0.075438 * zeta**2 + 0.21335 * zeta**3) * tau],
[(0.33323 + 0.0053322 * zeta + 0.17914 * zeta**2 + 0.20125 * zeta**3) * tau]])
# Define the shaper impulse amplitudes
amps = np.array([[1.0],[-1.0],[1.0]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class UMZVD(Shaper):
""" Class describing a UM-ZVD shaper """
type = 'UM-ZVD'
amp_type = 'Negative'
isPositive = False
def solve_for_shaper(self, frequency, zeta, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
tau = 1.0 / frequency
if zeta > 0.4:
warnings.warn('\n \nWARNING: Damping Ratio is probably too large.\n')
# Define the impulse times
times = np.array([[0.0],
[(0.08945 + 0.28411 * zeta + 0.23013*zeta**2 + 0.16401*zeta**3) * tau],
[(0.36613 - 0.08833 * zeta + 0.24048*zeta**2 + 0.17001*zeta**3) * tau],
[(0.64277 + 0.29103 * zeta + 0.23262*zeta**2 + 0.43784*zeta**3) * tau],
[(0.73228 + 0.00992 * zeta + 0.49385*zeta**2 + 0.38633*zeta**3) * tau]])
# Define the shaper impulse amplitudes
amps = np.array([[1.0],[-1.0],[1.0],[-1.0],[1.0]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
#----- Positive EI-Form Shapers (EI, 2-Hump EI, ...) --------------------------
class Tolerable_Level_Shapers(Shaper):
""" Parent class for all tolerable vibation shapers (EI, SI, etc) """
def __init__(self, frequency, zeta, Vtol=0.05, deltaT = 0.01):
""" Shaper Initialization function
Overrides the Shaper class __init__ to add Vtol
Parses the user inputs and calls solve_for_shaper method
to get amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
Vtol : the tolerable level of vibration, default is 5% = 0.05
deltaT : the samping time (s), used for the digitized version, default = 0.01
"""
self.design_freq = frequency
self.design_damping = zeta
self.design_deltaT = deltaT
self.Vtol = Vtol
self.shaper, self.digitized_shaper = self.solve_for_shaper(frequency, zeta, Vtol, deltaT)
self.times = self.shaper[:,0]
self.amps = self.shaper[:,1]
self.duration = self.times[-1]
def __str__(self):
""" Set up pretty printing of the shaper """
type = 'Shaper Type \t \t \t {}\n'.format(self.type)
designfreq = 'Design Frequency \t \t {:0.4f}\t Hz\n'.format(self.design_freq)
designdamp = 'Design Damping Ratio \t \t {:0.4f}\n'.format(self.design_damping)
designVtol = 'Design Vtol \t \t \t {:0.4f}\t % \n'.format(self.Vtol*100)
duration = 'Duration \t \t \t {:0.4f}\t s \n'.format(self.duration)
shaper = '\n' + ' ti Ai \n{}\n'.format(self.shaper)
return '\n' + type + designfreq + designdamp + designVtol + duration + shaper
class EI(Tolerable_Level_Shapers):
""" Class describing a EI shaper """
type = 'EI'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, Vtol, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
wd = wn * np.sqrt(1-zeta**2)
# Set up the impulse time spacing
shaperdeltaT = np.pi / (wn*np.sqrt(1-(zeta)**2))
# Define the impulse times
times = np.array([[0.0],
[2.0*np.pi*(0.499899+0.461586*Vtol*zeta + 4.26169*Vtol*zeta**2 + 1.75601*Vtol*zeta**3 + 8.57843*Vtol**2*zeta - 108.644*Vtol**2*zeta**2 + 336.989*Vtol**2*zeta**3) / wd],
[2.0 * np.pi/wd]])
# Define the shaper impulse amplitudes
amps = np.array([[0.249684 + 0.249623*Vtol + 0.800081*zeta + 1.23328*Vtol*zeta + 0.495987*zeta**2 + 3.17316*Vtol*zeta**2],
[0.0],
[0.251489 + 0.21474*Vtol - 0.832493*zeta + 1.41498*Vtol*zeta + 0.851806*zeta**2 - 4.90094*Vtol*zeta**2]])
# Now add the 2nd impulse
amps[1] = 1.0 - (amps[0] + amps[2])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class EI2HUMP(Tolerable_Level_Shapers):
""" Class describing a Two-hump EI shaper """
type = 'Two-Hump EI'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, Vtol, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
wd = wn * np.sqrt(1-zeta**2)
# Set up the impulse time spacing
tau = 1.0 / frequency
if zeta == 0.0:
X = (Vtol**2 * (np.sqrt(1-Vtol**2)+1))**(1.0/3)
# Define the impulse times
times = np.array([[0.0],
[0.5 * tau],
[tau],
[1.5 * tau]])
# Define the shaper impulse amplitudes
amps = np.array([[(3*X**2 + 2*X + 3*Vtol**2) / (16*X)],
[0.5 - (3*X**2 + 2*X + 3*Vtol**2) / (16*X)],
[0.5 - (3*X**2 + 2*X + 3*Vtol**2) / (16*X)],
[(3*X**2 + 2*X + 3*Vtol**2) / (16*X)]])
else:
# Define the impulse times
times = np.array([[0.0],
[(0.4989+0.1627*zeta-0.54262*zeta**2+6.1618*zeta**3) * tau],
[(0.99748+0.18382*zeta-1.5827*zeta**2+8.1712*zeta**3) * tau],
[(1.4992-0.09297*zeta-0.28338*zeta**2+1.8571*zeta**3) * tau]])
# Define the shaper impulse amplitudes
amps = np.array([[0.16054+0.76699*zeta+2.2656*zeta**2-1.2275*zeta**3],
[0.33911+0.45081*zeta-2.5808*zeta**2+1.7365*zeta**3],
[0.34089-0.61533*zeta-0.68765*zeta**2+0.42261*zeta**3],
[0.0]])
amps[3] = 1.0 - amps[0] - amps[1] - amps[2]
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class EI3HUMP(Tolerable_Level_Shapers):
""" Class describing a Three-hump EI shaper """
type = 'Three-Hump EI'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency, zeta, Vtol, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
wd = wn * np.sqrt(1-zeta**2)
# Set up the impulse time spacing
tau = 1.0 / frequency
# Define the impulse times
times = np.array([[0.0],
[0.5 * tau],
[1.0 * tau],
[1.5 * tau],
[2.0 * tau]])
# Define the shaper impulse amplitudes
amps = np.array([[(1+3*Vtol+2*np.sqrt(2*(Vtol**2+Vtol))) / 16],
[(1-Vtol) / 4],
[0.0],
[(1-Vtol) / 4],
[(1+3*Vtol+2*np.sqrt(2*(Vtol**2+Vtol))) / 16]])
amps[2] = 1.0 - 2 * (amps[0] + amps[1])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
#----- Negative EI-Form Shapers (EI, 2-Hump EI, ...) --------------------------
class UMEI(Tolerable_Level_Shapers):
""" Class describing a UM-EI shaper """
type = 'UM-EI'
amp_type = 'Negative'
isPositive = False
def solve_for_shaper(self, frequency, zeta, Vtol, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
Vtol : The tolerable level of vibration 0.05 = 5%
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
tau = 1.0 / frequency
if zeta > 0.4:
warnings.warn('\n \nWARNING: Damping Ratio is probably too large.\n')
# Define the impulse times
times = np.zeros((5,1))
if Vtol == 0.05:
times[0] = 0
times[1] = (0.09374 + 0.31903 * zeta + 0.13582 * zeta**2 + 0.65274 * zeta**3) * tau
times[2] = (0.36798 - 0.05894 * zeta + 0.13641 * zeta**2 + 0.63266 * zeta**3) * tau
times[3] = (0.64256 + 0.28595 * zeta + 0.26334 * zeta**2 + 0.24999 * zeta**3) * tau
times[4] = (0.73664 + 0.00162 * zeta + 0.52749 * zeta**2 + 0.19208 * zeta**3) * tau
elif Vtol == 0.0125:
times[0] = 0
times[1] = (0.09051 + 0.29315 * zeta + 0.20436 * zeta**2 + 0.29053 * zeta**3) * tau
times[2] = (0.36658 - 0.081044 * zeta + 0.21524 * zeta**2 + 0.27994 * zeta**3) * tau
times[3] = (0.64274 + 0.28822 * zeta + 0.25424 * zeta**2 + 0.34977 * zeta**3) * tau
times[4] = (0.73339 + 0.006322 * zeta + 0.51595 * zeta**2 + 0.29764 * zeta**3) * tau
else:
warnings.warn('Only V = 0.05 or V = 0.0125 can be used at this time.\n')
# Define the shaper impulse amplitudes
amps = np.array([[1.0],[-1.0],[1.0],[-1.0],[1.0]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class UM2EI(Tolerable_Level_Shapers):
""" Class describing a UM-Two-Hump EI shaper """
type = 'UM-Two-Hump EI'
amp_type = 'Negative'
isPositive = False
def solve_for_shaper(self, frequency, zeta, Vtol, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
Vtol : The tolerable level of vibration 0.05 = 5%
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
tau = 1.0 / frequency
if zeta > 0.4:
warnings.warn('\n \nWARNING: Damping Ratio is probably too large.\n')
# Define the impulse times
times = np.zeros((7,1))
if Vtol == 0.05:
times[0] = 0.0
times[1] = (0.059696 + 0.3136 * zeta + 0.31759 * zeta**2 + 1.5872 * zeta**3) * tau
times[2] = (0.40067 - 0.085698 * zeta + 0.14685 * zeta**2 + 1.6059 * zeta**3) * tau
times[3] = (0.59292 + 0.38625 * zeta + 0.34296 * zeta**2 + 1.2889 * zeta**3) * tau
times[4] = (0.78516 - 0.088283 * zeta + 0.54174 * zeta**2 + 1.3883 * zeta**3) * tau
times[5] = (1.1264 + 0.20919 * zeta + 0.44217 * zeta**2 + 0.30771 * zeta**3) * tau
times[6] = (1.1864 - 0.029931 * zeta + 0.79859 * zeta**2 + 0.10478 * zeta**3) * tau
elif Vtol == 0.0125:
times[0] = 0
times[1] = (0.052025 + 0.25516 * zeta + 0.33418 * zeta**2 + 0.70993 * zeta**3) * tau
times[2] = (0.39946 - 0.13396 * zeta + 0.23553 * zeta**2 + 0.59066 * zeta**3) * tau
times[3] = (0.58814 + 0.33393 * zeta + 0.4242 * zeta**2 + 0.4844 * zeta**3) * tau
times[4] = (0.77682 - 0.13392 * zeta + 0.61271 * zeta**2 + 0.63186 * zeta**3) * tau
times[5] = (1.1244 + 0.21132 * zeta + 0.55855 * zeta**2 + 0.12884 * zeta**3) * tau
times[6] = (1.1765 - 0.016188 * zeta + 0.9134 * zeta**2 - 0.068185 * zeta**3) * tau
else:
warnings.warn('\n \nOnly V = 0.05 or V = 0.0125 can be used at this time.\n')
# Define the shaper impulse amplitudes
amps = np.array([[1.0],[-1.0],[1.0],[-1.0],[1.0],[-1.0],[1.0]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class UM3EI(Tolerable_Level_Shapers):
""" Class describing a UM-Three-Hump EI shaper """
type = 'UM-Three-Hump EI'
amp_type = 'Negative'
isPositive = False
def solve_for_shaper(self, frequency, zeta, Vtol, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
frequency : the design frequency for the shaper (Hz)
zeta : the design damping ratio for the shaper
Vtol : The tolerable level of vibration 0.05 = 5%
deltaT : the samping time (s), used for the digitized version
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
wn = frequency * HZ_to_rads
tau = 1.0 / frequency
if zeta > 0.4:
warnings.warn('\n \nWARNING: Damping Ratio is probably too large.\n')
# Define the impulse times
times = np.zeros((9,1))
if Vtol == 0.05:
times[0] = 0
times[1] = (0.042745 + 0.31845 * zeta + 0.46272 * zeta**2 + 3.3763 * zeta**3) * tau
times[2] = (0.42418 - 0.05725 * zeta + 0.049893 * zeta**2 + 3.9768 * zeta**3) * tau
times[3] = (0.56353 + 0.48068 * zeta + 0.38047 * zeta**2 + 4.2431 * zeta**3) * tau
times[4] = (0.83047 - 0.097848 * zeta + 0.34048 * zeta**2 + 4.4245 * zeta**3) * tau
times[5] = (1.0976 + 0.38825 * zeta + 0.3529 * zeta**2 + 2.9484 * zeta**3) * tau
times[6] = (1.2371 - 0.08706 * zeta + 0.81706 * zeta**2 + 2.8367 * zeta**3) * tau
times[7] = (1.6189 + 0.099638 * zeta + 0.4278 * zeta**2 + 1.3151 * zeta**3) * tau
times[8] = (1.6619 - 0.097105 * zeta + 0.80045 * zeta**2 + 1.0057 * zeta**3) * tau
elif Vtol == 0.0125:
times[0] = 0
times[1] = (0.032665 + 0.23238 * zeta + 0.33164 * zeta**2 + 1.8423 * zeta**3) * tau
times[2] = (0.42553 - 0.12863 * zeta + 0.052687 * zeta**2 + 1.7964 * zeta**3) * tau
times[3] = (0.55502 + 0.36614 * zeta + 0.50008 * zeta**2 + 1.7925 * zeta**3) * tau
times[4] = (0.82296 - 0.19383 * zeta + 0.45316 * zeta**2 + 2.0989 * zeta**3) * tau
times[5] = (1.091 + 0.31654 * zeta + 0.46985 * zeta**2 + 1.2683 * zeta**3) * tau
times[6] = (1.2206 - 0.14831 * zeta + 0.93082 * zeta**2 + 1.2408 * zeta**3) * tau
times[7] = (1.6137 + 0.1101 * zeta + 0.68318 * zeta**2 + 0.18725 * zeta**3) * tau
times[8] = (1.6466 - 0.063739 * zeta + 1.0423 * zeta**2 - .10591 * zeta**3) * tau
else:
warnings.warn('\n \nOnly V = 0.05 or V = 0.0125 can be used at this time.\n')
# Define the shaper impulse amplitudes
amps = np.array([[1.0],[-1.0],[1.0],[-1.0],[1.0],[-1.0],[1.0],[-1.0],[1.0]])
shaper = np.hstack((times, amps))
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
#------ 2-Mode Shapers ---------------------------------------------------------
class Two_Mode_Shaper(Shaper):
""" Parent class for all Two-Mode Shapers """
def __init__(self, frequency1, zeta1, frequency2, zeta2, deltaT = 0.01):
""" Shaper Initialization function
Overrides the Shaper class __init__ to add 2nd mode parameters
Parses the user inputs and calls solve_for_shaper method
to get amplitudes and times
Arguments:
frequency1 : the design frequency for first mode (Hz)
zeta1 : damping ratio for the first mode
frequency2 : design frequency for the second mode (Hz)
zeta2 : damping ratio for the second mode
deltaT : the samping time (s), used for the digitized version, default = 0.01
"""
self.design_freq_1 = frequency1
self.design_freq_2 = frequency2
self.design_damping_1 = zeta1
self.design_damping_2 = zeta2
self.design_deltaT = deltaT
self.shaper, self.digitized_shaper = self.solve_for_shaper(frequency1, zeta1, frequency2, zeta2, deltaT)
self.times = self.shaper[:,0]
self.amps = self.shaper[:,1]
self.duration = self.times[-1]
def __str__(self):
""" Set up pretty printing of the shaper """
type = 'Shaper Type \t \t \t {}\n'.format(self.type)
designfreq1 = 'Mode 1 Design Frequency \t {:0.4f}\t Hz\n'.format(self.design_freq_1)
designdamp1 = 'Mode 1 Damping Ratio \t \t {:0.4f}\n'.format(self.design_damping_1)
designfreq2 = 'Mode 2 Design Frequency \t {:0.4f}\t Hz\n'.format(self.design_freq_2)
designdamp2 = 'Mode 2 Damping Ratio \t \t {:0.4f}\n'.format(self.design_damping_2)
duration = 'Duration \t \t \t {:0.4f}\t s \n'.format(self.duration)
shaper = '\n' + ' ti Ai \n{}\n'.format(self.shaper)
return '\n' + type + designfreq1 + designdamp1 + designfreq2 + designdamp2 + duration + shaper
class ZV_2mode(Two_Mode_Shaper):
""" Class describing a two-mode ZV shaper - created by convolving two ZV shapers"""
type = 'Two-Mode ZV'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency1, zeta1, frequency2, zeta2, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
The created instance, uses instance variables for calculation
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
zv1 = ZV(frequency1, zeta1, deltaT)
zv2 = ZV(frequency2, zeta2, deltaT)
shaper = seqconv(zv1.shaper, zv2.shaper)
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZVD_2mode(Two_Mode_Shaper):
""" Class describing a two-mode ZVD shaper - created by convolving two ZVD shapers"""
type = 'Two-Mode ZVD'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency1, zeta1, frequency2, zeta2, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
The created instance, uses instance variables for calculation
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
zvd1 = ZVD(frequency1, zeta1, deltaT)
zvd2 = ZVD(frequency2, zeta2, deltaT)
shaper = seqconv(zvd1.shaper, zvd2.shaper)
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZVDD_2mode(Two_Mode_Shaper):
""" Class describing a two-mode ZVDD shaper - created by convolving two ZVDD shapers"""
type = 'Two-Mode ZVDD'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency1, zeta1, frequency2, zeta2, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
The created instance, uses instance variables for calculation
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
zvdd1 = ZVDD(frequency1, zeta1, deltaT)
zvdd2 = ZVDD(frequency2, zeta2, deltaT)
shaper = seqconv(zvdd1.shaper, zvdd2.shaper)
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZVDDD_2mode(Two_Mode_Shaper):
""" Class describing a two-mode ZVDDD shaper - created by convolving two ZVDDD shapers"""
type = 'Two-Mode ZVDDD'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency1, zeta1, frequency2, zeta2, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
The created instance, uses instance variables for calculation
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
zvddd1 = ZVDDD(frequency1, zeta1, deltaT)
zvddd2 = ZVDDD(frequency2, zeta2, deltaT)
shaper = seqconv(zvddd1.shaper, zvddd2.shaper)
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
class ZV_EI_2mode(Two_Mode_Shaper):
""" Class describing a two-mode ZV shaper - created by convolving two ZV shapers"""
type = 'Two-Mode ZV'
amp_type = 'Positive'
isPositive = True
def solve_for_shaper(self, frequency1, zeta1, frequency2, zeta2, deltaT):
""" Return the shaper impulse amplitudes and times
Arguments:
The created instance, uses instance variables for calculation
Returns:
shaper : The shaper solution
digitized_shaper : the digitized version of the shaper
"""
zv1 = ZV(frequency1, zeta1, deltaT)
ei = EI(frequency2, zeta2, deltaT)
shaper = seqconv(zv1.shaper, ei.shaper)
digitized_shaper = digseq(shaper, deltaT)
return shaper, digitized_shaper
#----- Utility Functions ------------------------------------------------------
def digseq(seq,step):
'''Original MATLAB preamble
digseq - Whit Rappole
DIGITIZESEQ Map a sequence onto digital timing loop
dseq = digseq(seq,step)
Uses a linear extrapolation to split each continuous
impulse into two digital impulses
Converted to Python on 2/18/13 by Joshua Vaughan (joshua.vaughan@louisiana.edu)'''
dseq = np.zeros((int(round(seq[-1,0]/step))+2,1))
for nn in range(len(seq)):
index = int(np.floor(seq[nn,0]/step))
woof = (seq[nn,0]-index*step)/step
dseq[index+1] = dseq[index+1] + woof*seq[nn,1]
dseq[index] = dseq[index]+seq[nn,1] - woof*seq[nn,1]
while dseq[len(dseq)-1] == 0:
dseq = dseq[0:(len(dseq)-1)]
return dseq
def conv(Input,Shaper,deltaT):
"""Original MATLAB preamble
Convolve(Input,Shaper,deltaT) -- Bill Singhose
function [T,ShapedInput] = convolve(Input,Shaper,deltaT)
Covolves Input and Shaper and returns the result ShapedInput.
A time vector, T, which has the same number of rows as
ShapedInput is also returned. T starts at zero and is incremented
deltaT each step.
Input can be an nxm matrix,where m is the number of inputs.
Shaper must be a row or column vector.
Converted to Python on 2/19/13 by Joshua Vaughan (joshua.vaughan@louisiana.edu)
"""
# Just one column for now - JEV - 2/19/13
if np.size(Input.shape) == 1:
columns = 0
rows = Input.shape[0]
else:
(rows,columns)=Input.shape
print(columns)
print(rows)
shlen=len(Shaper)
print(shlen)
# Pad the Input vector with j extra final values,
# where j is the length of the shaper.
for jj in range(columns):
if columns == 0:
Input = np.append(Input,[Input[-1]*ones((shlen))])
else:
# Input[rows+j,jj]=Input[rows,jj]
Input = np.append(Input,Input[rows-1,-1]*np.ones((shlen,columns)),0)
# Reshape into vectors for convolution
Input = Input.reshape(len(Input),)
Shaper = Shaper.reshape(len(Shaper),)
ShInput = np.convolve(Input,Shaper)
# Delete convolution remainder
ShapedInput=ShInput[0:rows+shlen-1]
# Define end of command time and round to account for numerical errors
end_time = (len(ShapedInput))*deltaT
end_time = np.round(end_time, int(np.abs(np.log10(deltaT))))
# Create the "shaped" time vector to output
T= np.arange(0, end_time, deltaT)
error = len(T) - len(ShapedInput)
if error > 0:
ShapedInput = np.append(ShapedInput,np.zeros(error))
# return the "shaped" time vector and the shaped input
return T, ShapedInput
def sensplot(seq, fmin, fmax, zeta, numpoints = 2000, plotflag = 0):
"""Original MATLAB preamble
sensplot Plot the residual over range of frequencies
list = sensplot(seq,fmin,fmax,zeta,numpoints,plotflag)
seq is the shaping sequence
fmin is the low end of the frequency range
fmax is the high end of the frequency range
zeta is the damping ratio of the system
numpoints is the number of points to calculate, default is 2000
plotflag plots the data if plotflag=1, default is 0
Converted to Python on 2/26/13 by Joshua Vaughan (joshua.vaughan@louisiana.edu)"""
fmax = float(fmax) # force one value to be floating point, to ensure floating point math
df = (fmax-fmin)/numpoints
[rows,cols] = np.shape(seq)
tn = seq[-1,0]
frequency = np.zeros((numpoints,1))
amplitude = np.zeros((numpoints,1))
# the vibration percentage formulation is:
# t(i) is seq(i,1)
# A(i) is seq(i,2)
# tn is seq(num_of_rows_in_seq,1)
for nn in range(numpoints):
sintrm = 0
costrm = 0
freq = (fmin + nn*df)*2*np.pi
for i in range(rows):
sintrm = sintrm + seq[i,1]*np.exp(zeta*freq*seq[i,0])*np.sin(freq*np.sqrt(1-zeta**2)*seq[i,0])
costrm = costrm + seq[i,1]*np.exp(zeta*freq*seq[i,0])*np.cos(freq*np.sqrt(1-zeta**2)*seq[i,0])
frequency[nn,0] = freq/2.0/np.pi
amplitude[nn,0] = np.exp(-zeta*freq*tn)*np.sqrt(sintrm**2+costrm**2)
if plotflag == 1:
plt.plot(frequency, amplitude*100)
plt.xlabel(r'Frequency (Hz)',fontsize=22,weight='bold',labelpad=5)
plt.ylabel(r'Percentage Vibration',fontsize=22,weight='bold',labelpad=8)
plt.show()
return frequency, amplitude
def seqconv(shaper1, shaper2):
""" Original MATLAB preamble
SEQUENCECONVOLVE Convolve two continuous sequences together.
seq = seqconv(seq1,seq2)
Convolves two sequences together.
A Sequence is an n*2 matrix with impulse times (sec) in
the first column and amplitudes in the second column.
Parameters:
shaper1, shaper2 the two sequences to convolve together.
Returns:
seq, the sequence resulting from the convolution.
Converted to Python on 01/16/15 by Joshua Vaughan - joshua.vaughan@louisiana.edu
"""
index = 0
tempseq = np.zeros((np.shape(shaper1)[0] * np.shape(shaper2)[0], 2))
for ii in range(len(shaper1)):
for jj in range(len(shaper2)):
tempseq[index, 0] = shaper1[ii, 0] + shaper2[jj, 0]
tempseq[index, 1] = shaper1[ii, 1] * shaper2[jj, 1]
index += 1
return np.asarray(seqsort(tempseq))
def seqsort(shaper_sequence):
""" Function to sort a shaper sequence
Used mainly in the solution of two-mode shapers. Following convolution
these shapers are often mis-ordered or have multiple impulses at
identical times. This function sorts the impulses according to time, then
attemptes to resolve any multi-impulse time locations.
Arguments:
shaper_sequence : A typical [ti Ai] Nx2 shaper array
Returns:
sorted : The properly sorted and possibly shortened version
of shaper_sequence
Created: 01/16/15 - Joshua Vaughan - joshua.vaughan@louisiana.edu
"""
# Sort the sequence according to the impulse times (in the first column)
time_sorted = shaper_sequence[shaper_sequence[:,0].argsort()]
# print time_sorted
# Check if the number of unique times is equal to the number of rows
if len(np.unique(time_sorted[:,0])) != len(time_sorted[:,0]):
# If the lengths are not equal, there is a repeated time.
# Find it and combine the impulse amplitudes
# import pdb; pdb.set_trace()
shortened = np.zeros((len(np.unique(time_sorted[:,0])), 2))
# print '\nFinal length should be: ' + str((len(np.unique(time_sorted[:,0]))))
row = 0
ii = 0
index = 0
for time, amp in time_sorted:
# print '\nindex: {}'.format(index)
# print 'Current time {}'.format(time)
# print 'Current amp {}'.format(amp)
# print 'Current seq:\n{}'.format(shortened)
#
if time in shortened[:, 0]:
repeating_row = np.where(time==shortened[:, 0])[0][0]
# print 'Adding on row {}'.format(repeating_row)
shortened[repeating_row, 1] = shortened[repeating_row, 1] + amp
# print 'After adding {}'.format(shortened)
if time == 0:
index += 1
else:
# print 'Non-repeated time'
shortened[index, :] = np.array([time, amp])
# print 'Resulting seq:\n{}'.format(shortened)
index += 1
sorted = shortened
else:
sorted = time_sorted
return sorted
def bang_bang(CurrTime, Amax, Vmax, Distance, StartTime = 0.0):
"""
Function to create a bang-bang or bang-coast-bang acceleration command
Arguments:
CurrTime : The current timestep (or an array of times)
Amax : maximum acceleration of the command
Vmax : maximum velocity of the resulting command
Distance : How far the system would move from this command
StartTime : When the command should begin
Returns :
The acceleration commmand for the current timestep CurrTime or if an
array of times was passed, the array representing the input over that
time period.
"""
Distance = np.round(Distance,2)
# These are the times for a bang-coast-bang input
t1 = StartTime
t2 = np.round(Vmax/Amax,10) + t1
t3 = np.round(np.abs(Distance)/Vmax,10) + t1
t4 = (t2 + t3)-t1
end_time = t4
if Distance < 0.:
Amax *= -1
if t3 <= t2: # command should be bang-bang, not bang-coast-bang
t2 = np.sqrt(np.round((np.abs(Distance)/Amax),10))+t1
t3 = 2.0 * np.sqrt(np.round((np.abs(Distance)/Amax),10))+t1
end_time = t3
accel = Amax*(CurrTime > t1) - 2*Amax*(CurrTime > t2) + Amax*(CurrTime > t3)
else: # command is bang-coast-bang
accel = Amax*(CurrTime > t1) - Amax*(CurrTime > t2) - Amax*(CurrTime > t3) + Amax*(CurrTime > t4)
return accel
def impulse(CurrTime, Amax, Vmax, StartTime = 0.0):
"""
Function to create a bang-bang or bang-coast-bang acceleration command
Arguments:
CurrTime : The current timestep (or an array of times)
Amax : maximum acceleration of the command
Vmax : maximum velocity of the resulting command
Distance : How far the system would move from this command
StartTime : When the command should begin
Returns :
The acceleration commmand for the current timestep CurrTime or if an
array of times was passed, the array representing the input over that
time period.
"""
# These are the times for a bang-coast-bang input
t1 = StartTime
t2 = np.round(Vmax/Amax,10) + t1
accel = Amax*(CurrTime > t1) - Amax*(CurrTime > t2)
return accel
def step_input(CurrTime, Amp, StartTime = 0.0):
"""
Function to create a step input
Arguments:
CurrTime : The current timestep (will also take an array)
Amp : The size of the step input
StartTime : The time that the step should occur
Returns:
The step input for the CurrTime timestep or an array representing the
step input over the times pass
"""
return Amp * (CurrTime > StartTime)
def s_curve(CurrTime, Amp, RiseTime, StartTime):
"""
Function to generate an s-curve command
Arguments:
CurrTime : The current timestep or an array of times
Amp : The magnitude of the s-curve (or final setpoint)
RiseTime : The rise time of the curve
StartTime : The time that the command should StartTime
Returns :
The command at the current timestep or an array representing the command
over the times given (if CurrTime was an array)
"""
scurve = 2.0 * ((CurrTime - StartTime)/RiseTime)**2 * (CurrTime-StartTime >= 0) * (CurrTime-StartTime < RiseTime/2) \
+(-2.0 * ((CurrTime - StartTime)/RiseTime)**2 + 4.0 * ((CurrTime - StartTime)/RiseTime) - 1.0) * (CurrTime-StartTime >= RiseTime/2) * (CurrTime-StartTime < RiseTime) \
+ 1.0 * (CurrTime-StartTime >= RiseTime)
return Amp * scurve
def jerk_reduction(CurrTime,Distance,Amax,Vmax,StartTime):
# These are the times for a bang-coast-bang input
t1 = 0. * StartTime
t2 = 2 * (Vmax/Amax) + t1
t3 = 2 * (Distance - 2 * Vmax**2/Amax) / (Amax * t2) + t2
t4 = (t2 + t3)-t1
end_time = t4
time_step = CurrTime[1] - CurrTime[0]
accel = np.zeros_like(CurrTime)
if Distance <= 2 * Vmax**2 / Amax: # command should be bang-bang, not bang-coast-bang
t2 = np.sqrt(2*Distance/Amax)+t1
t3 = t2
t4 = np.sqrt(2*Distance/Amax)+t2
#t2_step = np.round(t2 / time_step).astype(int)
#t3_step = np.round(t3 / time_step).astype(int)
#end_time = t3
# accel[t1] = (Amax/2)* (1 - np.cos(2*np.pi*CurrTime/t2)) * (t2 > CurrTime > t1)
# -(Amax/2)* (1 - np.cos(2*np.pi*(CurrTime - t2)/t3)) * (t3 > CurrTime > t2)
#else: # command is bang-coast-bang
# accel[t1_step:t2_step] = (Amax/2)* (1 - np.cos(2*np.pi*CurrTime/t2))
#accel[t3_step:t4_step] = - (Amax/2)* (1 - np.cos(2*np.pi*(CurrTime - t3)/t2))
t1_step = 0 #np.round(t1 / time_step).astype(int)
t2_step = np.round(t2 / time_step).astype(int)
t3_step = np.round(t3 / time_step).astype(int)
t4_step = np.round(t4 / time_step).astype(int)
accel[t1_step:t2_step] = (Amax/2) * (1 - np.cos(2*np.pi*(CurrTime[t1_step:t2_step] - StartTime)/t2))
accel[t3_step:t4_step] = - (Amax/2) * (1 - np.cos(2*np.pi*(CurrTime[t3_step:t4_step] - t3 - StartTime)/t2))
return accel
def shaped_input(unshaped_func, CurrTime, Shaper, *args):
"""
Function to create a shaped input given a function for an unshaped command
Arguments:
unshaped_func : function representing the unshaped command
must accept current timestep as its first argument
CurrTime : The current timestep to deteremint the input for
Shaper : The shaper to use, should be in [ti Ai] form
*args : optional arguments to pass to unshaped_func
Returns:
shaped : the current timestep of the shaped command
"""
shaped = 0.0
for impulse_time, impulse_amp in Shaper:
shaped = shaped + impulse_amp * unshaped_func(CurrTime - impulse_time, *args)
return shaped
|
import numpy as np
def loadDataSet(fileName):
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
def selectJrand(i, m):
j = i
while (j == i):
j = int(np.random.uniform(0, m))
return j
def clipAlpha(aj, H, L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()
b = 0
m , n = np.shape(dataMatrix)
alphas = np.mat(np.zeros((m, 1)))
iter = 0
while (iter < maxIter):
alphaPairsChanged = 0
for i in range(m):
fXi = float(np.multiply(alphas, labelMat).T * (dataMatrix * dataMatrix[i, :].T)) + b
Ei = fXi - float(labelMat[i])
if ((labelMat[i] * Ei < toler) and (alphas[i] < C)) or ((labelMat[i] * Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i, m)
fXj = float(np.multiply(alphas, labelMat).T * (dataMatrix * dataMatrix[j, :].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy()
alphaJold = alphas[j].copy()
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L == H:
print('iter: {0}, i:{1}, alphaPairsChanged: {2}, L == H'.format(iter, i, alphaPairsChanged))
continue
eta = 2.0 * dataMatrix[i, :] * dataMatrix[j, : ].T - dataMatrix[i, :] * dataMatrix[i, :].T - dataMatrix[j, :] * dataMatrix[j, :].T
if eta >= 0:
print('iter: {0}, i:{1}, alphaPairsChanged: {2}, eta >= 0'.format(iter, i, alphaPairsChanged))
continue
alphas[j] -= labelMat[j] * (Ei - Ej) / eta
alphas[j] = clipAlpha(alphas[j], H, L)
if (abs(alphas[j] - alphaJold) < 0.00001) :
print('iter: {0}, i:{1}, alphaPairsChanged: {2}, j not moving enough'.format(iter, i, alphaPairsChanged))
continue
alphas[i] += labelMat[j] * labelMat[i] * (alphaJold - alphas[j])
b1 = b - Ei - labelMat[i] * (alphas[i] - alphaIold) * dataMatrix[i, :] * dataMatrix[i, :].T - labelMat[j] * \
(alphas[j] - alphaJold) * dataMatrix[i, :] * dataMatrix[j, :].T
b2 = b - Ej - labelMat[i] * (alphas[i] - alphaIold) * dataMatrix[i, : ] * dataMatrix[j, :].T - labelMat[j] * \
(alphas[j] - alphaJold) * dataMatrix[j, :] * dataMatrix[j, :].T
if (0 < alphas[i]) and (C > alphas[i]):
b = b1
elif (0 < alphas[j]) and (C > alphas[j]):
b = b2
else :
b = (b1 + b2) / 2.0
alphaPairsChanged += 1
print('iter: {0}, i:{1}, alphaPairsChanged: {2}, alpha changed'.format(iter, i, alphaPairsChanged))
else:
print('iter: {0}, i:{1}, alphaPairsChanged: {2}, not adjust'.format(iter, i, alphaPairsChanged))
if (alphaPairsChanged == 0):
iter += 1
else :
iter = 0
print('iteration number:{0}'.format(iter))
return b, alphas
def test():
dataArr, labelArr = loadDataSet('testSet.txt')
# print(dataArr)
# print(labelArr)
b, alphas = smoSimple(dataArr, labelArr, 0.6, 0.001, 40)
print(b)
print(alphas[alphas > 0])
print(np.shape(alphas[alphas > 0]))
for i in range(100):
if alphas[i] > 0.0:
print(dataArr[i], labelArr[i])
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler, kTup, useKernal=False):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = np.shape(dataMatIn)[0]
self.alphas = np.mat(np.zeros((self.m, 1)))
self.b = 0
self.eCache = np.mat(np.zeros((self.m, 2)))
self.useKernal = useKernal
if self.useKernal:
self.K = np.mat(np.zeros((self.m, self.m)))
for i in range(self.m):
self.K[:, i] = kernelTrans(self.X, self.X[i, :], kTup)
def calcEk(self, k):
if self.useKernal:
fXk = float(np.multiply(self.alphas, self.labelMat).T * self.K[:, k]) + self.b
else:
fXk = float(np.multiply(self.alphas, self.labelMat).T * (self.X * self.X[k, :].T)) + self.b
Ek = fXk - float(self.labelMat[k])
return Ek
def selectJ(self, i, Ei):
maxK = -1
maxDeltaE = 0
Ej = 0
self.eCache[i] = [1, Ei]
validEcacheList = np.nonzero(self.eCache[:, 0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList:
if k == i:
continue
Ek = self.calcEk(k)
deltaE = abs(Ei -Ek)
if (deltaE > maxDeltaE):
maxK = k
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej
else:
j = selectJrand(i, self.m)
Ej = self.calcEk(j)
return j, Ej
def updateEk(self, k):
Ek = self.calcEk(k)
self.eCache[k] = [1, Ek]
return None
def innerL(self, i):
Ei = self.calcEk(i)
if ((self.labelMat[i] * Ei < -self.tol) and (self.alphas[i] < self.C)) or ((self.labelMat[i] * Ei > self.tol) and (self.alphas[i] > 0)):
j, Ej = self.selectJ(i, Ei)
alphaIold = self.alphas[i].copy()
alphaJold = self.alphas[j].copy()
if (self.labelMat[i] != self.labelMat[j]):
L = max(0, self.alphas[j] - self.alphas[i])
H = min(self.C, self.C + self.alphas[j] - self.alphas[i])
else:
L = max(0, self.alphas[j] + self.alphas[i] - self.C)
H = min(self.C, self.alphas[j] + self.alphas[i])
if L == H:
return 0, 'L == H'
if self.useKernal:
eta = 2.0 * self.K[i, j] - self.K[i,i] - self.K[j,j]
else:
eta = 2.0 * self.X[i, :] * self.X[j, :].T - self.X[i, :] * self.X[i, :].T - self.X[j, :] * self.X[j, :].T
if eta >= 0:
return 0, 'eta >= 0'
self.alphas[j] -= self.labelMat[j] * (Ei - Ej) / eta
self.alphas[j] = clipAlpha(self.alphas[j], H, L)
self.updateEk(j)
if (abs(self.alphas[j] - alphaJold) < 0.00001):
return 0, 'j not moving enough'
self.alphas[i] += self.labelMat[j] * self.labelMat[i] * (alphaJold - self.alphas[j])
self.updateEk(i)
if self.useKernal:
b1 = self.b - Ei - self.labelMat[i] * (self.alphas[i] - alphaIold) * self.K[i, i] - self.labelMat[j] * (self.alphas[j] - alphaJold) * self.K[i, j]
b2 = self.b - Ej - self.labelMat[i] * (self.alphas[i] - alphaIold) * self.K[i, j] - self.labelMat[j] * (self.alphas[j] - alphaJold) * self.K[j, j]
else:
b1 = self.b - Ei - self.labelMat[i] * (self.alphas[i] - alphaIold) * self.X[i, :] * self.X[i, :].T - self.labelMat[j] * \
(self.alphas[j] - alphaJold) * self.X[i, :] * self.X[j, :].T
b2 = self.b - Ej - self.labelMat[i] * (self.alphas[i] - alphaIold) * self.X[i, : ] * self.X[j, :].T - self.labelMat[j] * \
(self.alphas[j] - alphaJold) * self.X[j, :] * self.X[j, :].T
if (0 < self.alphas[i]) and (self.C > self.alphas[i]):
self.b = b1
elif (0 < self.alphas[j]) and (self.C > self.alphas[j]):
self.b = b2
else:
self.b = (b1 + b2) / 2.0
return 1, 'update alphas'
else:
return 0, 'not adjust'
def smoP(dataMatIn, calssLabels, C, toler, maxIter, kTup = ('lin', 0)):
oS = optStruct(np.mat(dataMatIn), np.mat(calssLabels).transpose(), C, toler, kTup, True)
iter = 0
entireSet = True
alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet:
for i in range(oS.m):
tempAlphaPairsChanged, message = oS.innerL(i)
alphaPairsChanged += tempAlphaPairsChanged
#print('fullSet, iter:{0}, i:{1}, alphaPairChanged:{2}, {3}'.format(iter, i , alphaPairsChanged, message))
iter += 1
else:
nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
tempAlphaPairsChanged, message = oS.innerL(i)
alphaPairsChanged += tempAlphaPairsChanged
#print('non-bound, iter:{0}, i:{1}, alphaPairChanged:{2}, {3}'.format(iter, i, alphaPairsChanged, message))
iter += 1
if entireSet:
entireSet = False
elif (alphaPairsChanged == 0):
entireSet = True
#print('iteration number:{0}, entireSet:{1}'.format(iter, entireSet))
return oS.b, oS.alphas
def calcWs(alphas, dataArr, classLabels):
X = np.mat(dataArr)
labelMat = np.mat(classLabels).transpose()
m, n = np.shape(X)
w = np.zeros((n, 1))
for i in range(m):
w += np.multiply(alphas[i] * labelMat[i], X[i, :].T)
return w
def calcWsAndB():
dataArr, labelArr = loadDataSet('testSet.txt')
b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40)
print(b)
print(alphas[alphas > 0])
print(np.shape(alphas[alphas > 0]))
for i in range(100):
if alphas[i] > 0.0:
print(dataArr[i], labelArr[i])
ws = calcWs(alphas, dataArr, labelArr)
print(ws)
return ws, b
def test2():
dataArr, labelArr = loadDataSet('testSet.txt')
ws, b = calcWsAndB()
dataMat = np.mat(dataArr)
ws = np.mat(ws)
m = dataMat.shape[0]
errorCount = 0
for i in range(m):
rawResult = dataMat[i] * ws + b
if rawResult >= 0:
result = 1.0
else:
result = -1.0
print('classified result is:{0} and real result is: {1}, rawResult is: {2}'.format(result, labelArr[i], rawResult))
if result != labelArr[i]:
errorCount += 1
print('error rate is {0}'.format(float(errorCount) / m))
def kernelTrans(X, A, kTup):
m, n = np.shape(X)
K = np.mat(np.zeros((m, 1)))
if kTup[0] == 'lin':
K = X * A.T
elif kTup[0] == 'rbf':
for j in range(m):
deltaRow = X[j, :] - A
K[j] = deltaRow * deltaRow.T
K = np.exp(K / (-1 * kTup[1] ** 2))
else:
raise NameError('Houston We Have a Problem -- That Kernel is not recognized')
return K
def testRbf(k1 = 1.3):
dataArr, labelArr = loadDataSet('testSetRBF.txt')
b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1))
datMat = np.mat(dataArr)
labelMat = np.mat(labelArr).transpose()
svInd = np.nonzero(alphas.A > 0)[0]
sVs = datMat[svInd]
labelSV = labelMat[svInd]
print('there are {0} Support Vectors'.format(np.shape(sVs)[0]))
m, n = np.shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs, datMat[i,:], ('rbf', k1))
predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
if np.sign(predict) != np.sign(labelArr[i]):
errorCount += 1
print('the training error rate is:{0}'.format(float(errorCount) / m))
dataArr, labelArr = loadDataSet('testSetRBF2.txt')
errorCount = 0
datMat = np.mat(dataArr)
labelMat = np.mat(labelArr).transpose()
m,n = np.shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs, datMat[i, :], ('rbf', k1))
predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
if np.sign(predict) != np.sign(labelArr[i]):
errorCount += 1
print('the test error rate is:{0}'.format(float(errorCount) / m))
def img2vector(fileName):
returnVect = np.zeros((1,1024))
fr = open(fileName)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32*i + j] = int(lineStr[j])
return returnVect
def loadImages(dirName):
from os import listdir
hwLabels = []
trainingFileList = listdir(dirName)
m = len(trainingFileList)
trainingMat = np.zeros((m, 1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
if classNumStr == 9:
hwLabels.append(-1)
else:
hwLabels.append(1)
trainingMat[i, :] = img2vector('{0}/{1}'.format(dirName, fileNameStr))
return trainingMat, hwLabels
def testDigits(kTup = ('rbf', 10)):
dataArr, labelArr = loadImages('trainingDigits')
b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTup)
datMat = np.mat(dataArr)
labelMat = np.mat(labelArr).transpose()
svInd = np.nonzero(alphas.A > 0)[0]
sVs = datMat[svInd]
labelSV = labelMat[svInd]
print('there are {0} Support Vectors when change rate is:{1}'.format(np.shape(sVs)[0], kTup[1]))
m, n = np.shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs, datMat[i, :], kTup)
predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
if np.sign(predict) != np.sign(labelArr[i]):
errorCount += 1
print('the training error rate is:{0}'.format(float(errorCount) / m))
dataArr, labelArr = loadImages('testDigits')
errorCount = 0
datMat = np.mat(dataArr)
labelMat = np.mat(labelArr).transpose()
m, n = np.shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs, datMat[i, :], kTup)
predict = kernelEval.T * np.multiply(labelSV, alphas[svInd]) + b
if np.sign(predict) != np.sign(labelArr[i]):
errorCount += 1
print('the test error rate is:{0}'.format(float(errorCount) / m))
if __name__ == '__main__':
#test()
#test2()
#testRbf(0.5)
testDigits(('rbf', 0.1))
testDigits(('rbf', 5))
testDigits(('rbf', 10))
testDigits(('rbf', 50))
testDigits(('rbf', 100))
|
import sys
input = sys.stdin.readline
def main():
N, P = map(int,input().split())
AB = [ tuple(map(int,input().split())) for _ in range(N)]
AB.sort(reverse=True)
dp = [0]*(P+1)
ans = 0
for a, b in AB:
if ans < dp[P]+b:
ans = dp[P]+b
for i in range(P,a-1,-1):
if dp[i] < dp[i-a]+b:
dp[i] = dp[i-a]+b
print(ans)
if __name__ == '__main__':
main()
|
#Receba um número. Calcule e mostre a série 1 + 1/2 + 1/3 + ... + 1/N.
n=int(input('digite um numero: '))
c=int(1)
s=int(0)
while c<=n:
print(f'1/{c}+')
s+=1/c
c+=1
print(f'={s}') |
# -*- coding:utf-8 -*-
import json
from Appointment.APmodel import APmodelHandler
from BaseHandlerh import BaseHandler
# 约拍伴侣
from Database.tables import WApCompanions, WAcAuth
from FileHandler.ImageHandler import ImageHandler
class ApCompanionHandler(BaseHandler):
retjson = {'code':'', 'contents':''}
def post(self):
type = self.get_argument('type')
if type == '10900': # 发布约拍伴侣
ApcTitle = self.get_argument('title')
ApOrc = self.get_argument('orgnazation')
ApcContent = self.get_argument('content')
ApcUrl = self.get_argument('companionUrl')
Apcimg = self.get_arguments('companionImgs[]')
new_ApCompanion = WApCompanions(
WAPCname=ApcTitle,
WAPCServeintro=ApcContent, # 服务内容介绍
WAPCOrganintro=ApOrc,
WAPCvalid=1,
WAPCContact=ApcUrl,
)
self.db.merge(new_ApCompanion)
self.db.commit()
try:
OneCompanion = self.db.query(WApCompanions).filter(WApCompanions.WAPCname == ApcTitle,
WApCompanions.WAPCServeintro == ApcContent,
WApCompanions.WAPCContact == ApcUrl,
WApCompanions.WAPCvalid == 1).one()
image = ImageHandler()
image.insert_companion_image(Apcimg, OneCompanion.WAPCid)
self.db.commit()
self.retjson['code'] = '10900'
self.retjson['contents'] = '约拍伴侣创建成功'
except Exception, e:
print e
self.retjson['code']='10901'
self.retjson['contents']='创建失败'
elif type == '10904':
# 返回约拍伴侣
retdata = []
Companion_all = self.db.query(WApCompanions).filter(WApCompanions.WAPCvalid == 1).all()
modelhandler = APmodelHandler()
for item in Companion_all:
modelhandler.ApCompanion(item, retdata)
self.retjson['code'] = '10904'
self.retjson['contents'] = retdata
self.write(json.dumps(self.retjson, ensure_ascii=False, indent=2))
|
def soma(n1, n2):
resp = n1 + n2
return resp
retorno_soma = soma(0, 1024)
print(retorno_soma)
|
import requests
import datetime
import random
from django.db import models
from django.db.models import Sum
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.template.defaultfilters import linebreaks
from django.urls import reverse
from django.utils import timezone
from django.utils.html import escape
from django.utils.timesince import timesince
from django.conf import settings
from PIL import Image
def randstr(length):
rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ'
rstr_len = len(rstr) - 1
result = ''
for i in range(length):
result += rstr[random.randint(0, rstr_len)]
return result
def parsedown(text):
data = {'md': text.encode('utf-8')}
res = requests.post(settings.API_URL + '/api/safe-parsedown/get.php', data=data)
return res.text
def avatar_path(instance, filename):
dt = datetime.datetime.now()
return 'images/avatar/u/' + instance.user.username + '/' + randstr(4) + '.' + filename.split('.')[-1]
def title_image_path(instance, filename):
dt = datetime.datetime.now()
return 'images/title/' + '/' + str(dt.year) + '/' + str(dt.month) + '/' + str(dt.day) + '/' + instance.author.username + '/' + str(dt.hour) + '_' + randstr(8) + '.' + filename.split('.')[-1]
def make_thumbnail(this, size, save_as=False, quality=100):
if hasattr(this, 'avatar'):
this.image = this.avatar
image = Image.open(this.image)
image.thumbnail((size, size), Image.ANTIALIAS)
image.save('static/' + (str(this.image) if not save_as else this.get_thumbnail()), quality=quality)
class History(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
post = models.ForeignKey('board.Post', on_delete = models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.user.username
class Grade(models.Model):
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class Config(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
agree_email = models.BooleanField(default=False)
agree_history = models.BooleanField(default=False)
telegram_token = models.CharField(max_length=8, blank=True)
telegram_id = models.CharField(max_length=15, blank=True)
password_qna = models.TextField(blank=True)
def __str__(self):
return self.user.username
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
subscriber = models.ManyToManyField(User, through='Follow', related_name='following', blank=True)
grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True)
exp = models.IntegerField(default=0)
bio = models.TextField(max_length=500, blank=True)
avatar = models.ImageField(blank=True,upload_to=avatar_path)
github = models.CharField(max_length=15, blank=True)
twitter = models.CharField(max_length=15, blank=True)
youtube = models.CharField(max_length=30, blank=True)
facebook = models.CharField(max_length=30, blank=True)
instagram = models.CharField(max_length=15, blank=True)
homepage = models.CharField(max_length=100, blank=True)
about_md = models.TextField()
about_html = models.TextField()
def thumbnail(self):
if self.avatar:
return self.avatar.url
else:
return settings.STATIC_URL + '/images/default-avatar.jpg'
def __str__(self):
return self.user.username
def total_subscriber(self):
return self.subscriber.count()
def save(self, *args, **kwargs):
will_make_thumbnail = False
if not self.pk and self.avatar:
will_make_thumbnail = True
try:
this = Profile.objects.get(id=self.id)
if this.avatar != self.avatar:
this.avatar.delete(save=False)
will_make_thumbnail = True
except:
pass
super(Profile, self).save(*args, **kwargs)
if will_make_thumbnail:
make_thumbnail(self, size=500)
def get_absolute_url(self):
return reverse('user_profile', args=[self.user])
class Follow(models.Model):
class Meta:
db_table = 'board_user_follow'
auto_created = True
following = models.ForeignKey(Profile, on_delete=models.CASCADE)
follower = models.ForeignKey(User, on_delete=models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.post.title
class Thread(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=50)
description = models.TextField(blank=True)
url = models.SlugField(max_length=50, unique=True, allow_unicode=True)
image = models.ImageField(blank=True, upload_to=title_image_path)
hide = models.BooleanField(default=False)
notice = models.BooleanField(default=False)
allow_write = models.BooleanField(default=False)
created_date = models.DateTimeField(default=timezone.now)
real_created_date = models.DateTimeField(default=timezone.now)
tag = models.CharField(max_length=50)
bookmark = models.ManyToManyField(User, related_name='bookmark_thread', blank=True)
def __str__(self):
return self.title
def thumbnail(self):
if self.image:
return self.image.url
else:
return settings.STATIC_URL + '/images/default-post.png' if not self.image else self.image.url
def total_bookmark(self):
return self.bookmark.count()
def tagging(self):
return [tag for tag in self.tag.split(',') if tag]
def today(self):
count = 0
try:
today = timezone.make_aware(datetime.datetime.now())
count = ThreadAnalytics.objects.get(date=today, thread=self).count
except:
pass
return count
def yesterday(self):
count = 0
try:
yesterday = timezone.make_aware(datetime.datetime.now() - datetime.timedelta(days=1))
count = ThreadAnalytics.objects.get(date=yesterday, thread=self).count
except:
pass
return count
def total(self):
count = ThreadAnalytics.objects.filter(thread=self).aggregate(Sum('count'))
if count['count__sum']:
return count['count__sum']
else:
return 0
def trendy(self):
seven_days_ago = timezone.make_aware(datetime.datetime.now() - datetime.timedelta(days=7))
today = timezone.make_aware(datetime.datetime.now())
count = ThreadAnalytics.objects.filter(date__range=[seven_days_ago, today], thread=self).aggregate(Sum('count'))
if count['count__sum']:
return count['count__sum']/10
else:
return 0
def get_absolute_url(self):
return reverse('thread_detail', args=[self.url])
def get_thumbnail(self):
return str(self.image) + '.minify.' + str(self.image).split('.')[-1]
def to_dict_for_analytics(self):
return {
'pk': self.pk,
'author': self.author.username,
'title': self.title,
'date': self.created_date,
'today': self.today(),
'yesterday': self.yesterday(),
'total': self.total(),
'hide': self.hide,
'total_story': self.stories.count(),
'total_bookmark': self.total_bookmark(),
'tag': self.tag,
'url': self.get_absolute_url(),
}
def save(self, *args, **kwargs):
will_make_thumbnail = False
if not self.pk and self.image:
will_make_thumbnail = True
try:
this = Thread.objects.get(id=self.id)
if this.image != self.image:
this.image.delete(save=False)
will_make_thumbnail = True
except:
pass
super(Thread, self).save(*args, **kwargs)
if will_make_thumbnail:
make_thumbnail(self, size=750, save_as=True)
class ThreadAnalytics(models.Model):
thread = models.ForeignKey(Thread, on_delete=models.CASCADE)
date = models.DateField(default=timezone.now)
count = models.IntegerField(default=0)
referer = models.TextField()
iptable = models.TextField()
def __str__(self):
return self.thread.title
class Story(models.Model):
class Meta:
ordering = ['-created_date']
thread = models.ForeignKey('board.Thread', related_name='stories', on_delete = models.CASCADE)
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text_md = models.TextField()
text_html = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
updated_date = models.DateTimeField(default=timezone.now)
agree = models.ManyToManyField(User, related_name='agree_story', blank=True)
disagree = models.ManyToManyField(User, related_name='disagree_story', blank=True)
def __str__(self):
return self.title
def total_disagree(self):
return self.disagree.count()
def total_agree(self):
return self.agree.count()
def to_dict(self):
return {
'pk': self.pk,
'title': self.title,
'author': self.author.username,
'content': self.text_html,
'agree': self.total_agree(),
'disagree': self.total_disagree(),
'thumbnail': self.author.profile.thumbnail(),
'created_date': self.created_date.strftime("%Y-%m-%d %H:%M"),
'updated_date': self.updated_date.strftime("%Y-%m-%d %H:%M"),
}
class TempPosts(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=50)
token = models.CharField(max_length=50)
text_md = models.TextField(blank=True)
tag = models.CharField(max_length=50)
created_date = models.DateTimeField(default=timezone.now)
def to_dict(self):
return {
'title': self.title,
'token': self.token,
'text_md': self.text_md,
'tag': self.tag,
'created_date': timesince(self.created_date),
}
def __str__(self):
return self.title
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=50)
url = models.SlugField(max_length=50, unique=True, allow_unicode=True)
image = models.ImageField(blank=True, upload_to=title_image_path)
text_md = models.TextField(blank=True)
text_html = models.TextField()
hide = models.BooleanField(default=False)
notice = models.BooleanField(default=False)
block_comment = models.BooleanField(default=False)
likes = models.ManyToManyField(User, through='PostLikes', related_name='like_posts', blank=True)
tag = models.CharField(max_length=50)
created_date = models.DateTimeField(default=timezone.now)
updated_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
def thumbnail(self):
if self.image:
return self.image.url
else:
return settings.STATIC_URL + '/images/default-post.png'
def get_absolute_url(self):
return reverse('post_detail', args=[self.author, self.url])
def total_likes(self):
return self.likes.count()
def today(self):
count = 0
try:
today = timezone.make_aware(datetime.datetime.now())
count = PostAnalytics.objects.get(date=today, posts=self).count
except:
pass
return count
def yesterday(self):
count = 0
try:
yesterday = timezone.make_aware(datetime.datetime.now() - datetime.timedelta(days=1))
count = PostAnalytics.objects.get(date=yesterday, posts=self).count
except:
pass
return count
def total(self):
count = PostAnalytics.objects.filter(posts=self).aggregate(Sum('count'))
if count['count__sum']:
return count['count__sum']
else:
return 0
def trendy(self):
seven_days_ago = timezone.make_aware(datetime.datetime.now() - datetime.timedelta(days=7))
today = timezone.make_aware(datetime.datetime.now())
count = PostAnalytics.objects.filter(date__range=[seven_days_ago, today], posts=self).aggregate(Sum('count'))
if count['count__sum']:
return count['count__sum']/10
else:
return 0
def tagging(self):
return [tag for tag in self.tag.split(',') if tag]
def get_thumbnail(self):
return str(self.image) + '.minify.' + str(self.image).split('.')[-1]
def to_dict_for_analytics(self):
return {
'pk': self.pk,
'author': self.author.username,
'title': self.title,
'data': self.created_date,
'today': self.today(),
'yesterday': self.yesterday(),
'total': self.total(),
'hide': self.hide,
'total_comment': self.comments.count(),
'total_likes': self.total_likes(),
'tag': self.tag,
'url': self.get_absolute_url(),
}
def save(self, *args, **kwargs):
will_make_thumbnail = False
if not self.pk and self.image:
will_make_thumbnail = True
try:
this = Post.objects.get(id=self.id)
if this.image != self.image:
this.image.delete(save=False)
will_make_thumbnail = True
except:
pass
super(Post, self).save(*args, **kwargs)
if will_make_thumbnail:
make_thumbnail(self, size=750, save_as=True)
class PostAnalytics(models.Model):
posts = models.ForeignKey(Post, on_delete=models.CASCADE)
date = models.DateField(default=timezone.now)
count = models.IntegerField(default=0)
referer = models.TextField()
iptable = models.TextField()
def __str__(self):
return self.posts.title
class PostLikes(models.Model):
class Meta:
db_table = 'board_post_likes'
auto_created = True
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.post.title
class Comment(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE)
text = models.TextField(max_length=300)
edit = models.BooleanField(default=False)
heart = models.BooleanField(default=False)
likes = models.ManyToManyField(User, related_name='like_comments', blank=True)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.text
def thumbnail(self):
if self.image:
return self.image.url
else:
return settings.STATIC_URL + '/images/default-post.png'
def total_likes(self):
return self.likes.count()
def to_dict(self):
return {
'pk': self.pk,
'author': self.author.username,
'created_date': timesince(self.created_date),
'content': linebreaks(escape(self.text)),
'total_likes': self.total_likes(),
'thumbnail': self.author.profile.thumbnail(),
'edited': 'edited' if self.edit == True else '',
}
class Notify(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
url = models.CharField(max_length=255)
is_read = models.BooleanField(default=False)
infomation = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.infomation
def to_dict(self):
return {
'pk': self.pk,
'user': self.user.username,
'infomation': self.infomation,
'created_date': timesince(self.created_date)
}
class Series(models.Model):
owner = models.ForeignKey('auth.User', on_delete=models.CASCADE)
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True)
url = models.SlugField(max_length=50, unique=True, allow_unicode=True)
posts = models.ManyToManyField(Post, related_name='series', blank=True)
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def thumbnail(self):
if self.posts.first():
return self.posts.first().thumbnail()
else:
return settings.STATIC_URL + '/images/default-post.png'
def get_absolute_url(self):
return reverse('series_list', args=[self.owner, self.url]) |
import json
import numpy as np
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import bit_stream_decoder
import byte_stream_generator
import channel_restorer
import histogram_generator
import huffman_code_decode_generator
import image_compressor
file_name = 'images/img.png'
original_image = cv2.imread(file_name)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
# Plot original image
plt.figure(figsize=(20, 10))
plt.imshow(original_image)
# Extracting the red screen
red_scale_image = original_image[:, :, 0]
# Plot original image
plt.figure(figsize=(15, 10))
plt.imshow(red_scale_image, cmap='gray', vmin=0, vmax=255)
# Extracting the green screen
green_scale_image = original_image[:, :, 1]
# Plot original image
plt.figure(figsize=(15, 10))
plt.imshow(green_scale_image, cmap='gray', vmin=0, vmax=255)
# Extracting the blue screen
blue_scale_image = original_image[:, :, 2]
# Plot original image
plt.figure(figsize=(15, 10))
plt.imshow(blue_scale_image, cmap='gray', vmin=0, vmax=255)
red_channel_histogram_array = histogram_generator.histogram_array_generator(red_scale_image)
green_channel_histogram_array = histogram_generator.histogram_array_generator(green_scale_image)
blue_channel_histogram_array = histogram_generator.histogram_array_generator(blue_scale_image)
red_channel_probability_distribution = histogram_generator.probability_distribution_generator(
red_channel_histogram_array, 800 * 1200)
green_channel_probability_distribution = histogram_generator.probability_distribution_generator(
green_channel_histogram_array, 800 * 1200)
blue_channel_probability_distribution = histogram_generator.probability_distribution_generator(
blue_channel_histogram_array, 800 * 1200)
red_channel_probability_distribution['separator'] = 0
red_huffman_coding = huffman_code_decode_generator.Huffman_Coding(red_channel_probability_distribution)
red_coded_pixels, red_reverse_coded_pixels = red_huffman_coding.compress()
green_channel_probability_distribution['separator'] = 0
green_huffman_coding = huffman_code_decode_generator.Huffman_Coding(green_channel_probability_distribution)
green_coded_pixels, green_reverse_coded_pixels = green_huffman_coding.compress()
blue_huffman_coding = huffman_code_decode_generator.Huffman_Coding(blue_channel_probability_distribution)
blue_coded_pixels, blue_reverse_coded_pixels = blue_huffman_coding.compress()
with open('codes/red_channel_codes.json', 'w') as fp:
json.dump(red_coded_pixels, fp)
with open('decodes/red_channel_decodes.json', 'w') as fp:
json.dump(red_reverse_coded_pixels, fp)
with open('codes/green_channel_codes.json', 'w') as fp:
json.dump(green_coded_pixels, fp)
with open('decodes/green_channel_decodes.json', 'w') as fp:
json.dump(green_reverse_coded_pixels, fp)
with open('codes/blue_channel_codes.json', 'w') as fp:
json.dump(blue_coded_pixels, fp)
with open('decodes/blue_channel_decodes.json', 'w') as fp:
json.dump(blue_reverse_coded_pixels, fp)
red_channel_compressed_image = image_compressor.compressor(red_scale_image, red_coded_pixels)
green_channel_compressed_image = image_compressor.compressor(green_scale_image, green_coded_pixels)
blue_channel_compressed_image = image_compressor.compressor(blue_scale_image, blue_coded_pixels)
bit_stream = byte_stream_generator.byte_stream(red_channel_compressed_image, green_channel_compressed_image,
blue_channel_compressed_image, red_coded_pixels['separator'],
green_coded_pixels['separator'])
print('Compression ratio:', (len(bit_stream) / (red_scale_image.shape[0] * red_scale_image.shape[1] * 3 * 8)))
with open('bit_stream.txt', 'w') as fp:
fp.write(bit_stream)
# image decompression
red_channel_decoder = json.load(open('./decodes/red_channel_decodes.json', 'r'))
green_channel_decoder = json.load(open('./decodes/green_channel_decodes.json', 'r'))
blue_channel_decoder = json.load(open('./decodes/blue_channel_decodes.json', 'r'))
with open('bit_stream.txt', 'r') as fr:
bit_stream = fr.read()
pixel_stream = bit_stream_decoder.decoder(bit_stream, red_channel_decoder, green_channel_decoder, blue_channel_decoder,
file_name)
with open('image_pixel_stream.txt', 'w') as fr:
fr.write(str(pixel_stream))
# image restoring
with open('image_pixel_stream.txt', 'r') as fr:
pixel_stream = fr.read()
pixel_stream = pixel_stream.replace('[', '')
pixel_stream = pixel_stream.replace(']', '')
pixel_stream = pixel_stream.split(', ')
pixel_stream = [int(pixel) for pixel in pixel_stream]
red_channel_pixel_stream = pixel_stream[:int(len(pixel_stream) / 3)]
green_channel_pixel_stream = pixel_stream[int(len(pixel_stream) / 3):int((2 * len(pixel_stream)) / 3)]
blue_channel_pixel_stream = pixel_stream[int((2 * len(pixel_stream)) / 3):int(len(pixel_stream))]
original_image = cv2.imread(file_name)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
red_channel_image = np.reshape(red_channel_pixel_stream, (original_image.shape[0], original_image.shape[1]))
plt.figure(figsize=(20, 10))
plt.imshow(red_channel_image, cmap='gray', vmin=0, vmax=255)
green_channel_image = np.reshape(green_channel_pixel_stream, (original_image.shape[0], original_image.shape[1]))
plt.figure(figsize=(20, 10))
plt.imshow(green_channel_image, cmap='gray', vmin=0, vmax=255)
blue_channel_image = np.reshape(blue_channel_pixel_stream, (original_image.shape[0], original_image.shape[1]))
plt.figure(figsize=(20, 10))
plt.imshow(blue_channel_image, cmap='gray', vmin=0, vmax=255)
red_channel_loss = original_image[:, :, 0] - red_channel_image
green_channel_loss = original_image[:, :, 1] - green_channel_image
blue_channel_loss = original_image[:, :, 2] - blue_channel_image
total_loss = np.sum(red_channel_loss) + np.sum(green_channel_loss) + np.sum(blue_channel_loss)
print('Total loss (accross all red, green and blue channels):', total_loss)
restored_image = channel_restorer.image_restorer(red_channel_image, green_channel_image, blue_channel_image)
print('Original image dimensions:', np.array(original_image).shape)
print('Restored image dimensions:', np.array(restored_image).shape)
fig = plt.figure(frameon=False, figsize=(20, 10))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(restored_image, aspect='auto')
fig.savefig('out.png', dpi=150)
|
import json
import logging
import os
import time
import urllib
import boto3
import numpy as np
import pandas as pd
import regex as re
from util.aws.s3 import s3_exists
def _parse_transcription(handle, speaker_labels={}):
if isinstance(handle, str):
from util.aws.s3 import s3_download
handle = open(s3_download(handle))
results = json.load(handle)
__to_timedelta = lambda series: pd.to_timedelta(series.astype(float), unit="S").dt.round("S")
transcript_df = pd.DataFrame(results["results"]["items"])
transcript_df["start_time"] = __to_timedelta(transcript_df["start_time"])
transcript_df["end_time"] = __to_timedelta(transcript_df["end_time"])
transcript_df["content"] = transcript_df["alternatives"].apply(lambda row: row[0]["content"])
transcript_df["confidence"] = transcript_df["alternatives"].apply(lambda row: row[0]["confidence"]).astype(float)
transcript_df["confidence"] = (transcript_df["confidence"] + 1e-5).clip(upper=1.000)
transcript_df["confidence"] = np.log(transcript_df["confidence"])
if "speaker_labels" in results["results"]:
speaker_df = pd.DataFrame(results["results"]["speaker_labels"]["segments"])
speaker_df["start_time"] = __to_timedelta(speaker_df["start_time"])
speaker_df["end_time"] = __to_timedelta(speaker_df["end_time"])
transcript_df.set_index("start_time", inplace=True)
def __content(row):
mask = (row["start_time"] <= transcript_df.index) & (transcript_df.index < row["end_time"])
content = " ".join(transcript_df.loc[mask, "content"])
confidence = transcript_df.loc[mask, "confidence"].sum()
return content, confidence
speaker_df[["content", "confidence"]] = speaker_df.apply(__content, axis=1, result_type="expand")
speaker_df.rename(columns={ "speaker_label": "speaker" }, inplace=True)
if speaker_labels:
speaker_df["speaker"] = speaker_df["speaker"].apply(speaker_labels.get)
transcript_df = speaker_df[["speaker", "content", "confidence", "start_time", "end_time"]]
return transcript_df
def _transcribe_audio(s3_target_path, s3_source_path, name=None, speaker_ct=2,
language="en-US", region="us-west-1", retries=10, **kwargs):
if not s3_exists(s3_target_path):
transcribe_client = boto3.client("transcribe")
job_name = name or re.sub(r"\W", "_", s3_target_path)
s3_source_cmps = urllib.parse.urlparse(s3_source_path)
s3_target_cmps = urllib.parse.urlparse(s3_target_path)
transcribe_client.start_transcription_job(**{
"TranscriptionJobName": job_name,
"LanguageCode": language,
"MediaFormat": os.path.splitext(s3_source_cmps.path)[-1][1:],
"Media": {
"MediaFileUri": s3_source_path,
},
"OutputBucketName": s3_target_cmps.netloc,
"Settings": {
"ShowSpeakerLabels": True,
"MaxSpeakerLabels": speaker_ct,
}
})
assert(retries >= 0)
for ix in range(retries + 1):
job = transcribe_client.get_transcription_job(TranscriptionJobName=job_name).get("TranscriptionJob", {})
if job.get("TranscriptionJobStatus") != "IN_PROGRESS":
logging.info("Stopping %s job: %s", job_name, job)
break
sleep_s = 2.000 ** ix
logging.debug("Retrying %s job after %.0f seconds", job_name, sleep_s)
time.sleep(sleep_s)
s3_interim_path = re.sub(r"https://s3\..*\.amazonaws\.com/", "s3://", job.get("Transcript", {}).get("TranscriptFileUri"))
s3_interim_cmps = urllib.parse.urlparse(s3_interim_path)
if job["TranscriptionJobStatus"] != "COMPLETED":
logging.error("Couldn't complete %s job: %s [%s]: %s", job_name, job["TranscriptionJobStatus"],
job.get("FailureReason"), s3_interim_path)
return None
s3_client = boto3.client("s3")
s3_client.copy_object(**{
"CopySource": {
"Bucket": s3_interim_cmps.netloc,
"Key": s3_interim_cmps.path.lstrip("/"),
},
"Bucket": s3_target_cmps.netloc,
"Key": s3_target_cmps.path.lstrip("/"),
})
s3_client.delete_object(Bucket=s3_interim_cmps.netloc, Key=s3_interim_cmps.path.lstrip("/"))
transcript_df = _parse_transcription(s3_target_path, **kwargs)
return transcript_df
|
#!/usr/bin/python
"""finding the area of a general space using only the boundry points
this will only work for solid space as of now"""
print "points:"
raw_points = raw_input().split(' ')
points = []
if (len(raw_points) % 2 == 1):
raise Exception('not a list of points')
else:
for i in xrange(0,len(raw_points),2):
points.append((int(raw_points[i]), int(raw_points[i+1])))
"""figure out how to count internal points"""
I = 0
area = len(points)/2.0 + I - 1
print points
print area
|
from measurements.api.viewsets import TemperatureViewSet, HumidityViewSet, ElectricityViewSet, WaterViewSet, PollutionViewSet, ConfigViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register('temperature', TemperatureViewSet, base_name='temperature')
router.register('humidity', HumidityViewSet, base_name='humidity')
router.register('electricity', ElectricityViewSet, base_name='electricity')
router.register('water', WaterViewSet, base_name='water')
router.register('pollution', PollutionViewSet, base_name='pollution')
router.register('config', ConfigViewSet, base_name='config')
|
# print(f" Episode {episode_trajectory}")
# print(f" State Value {new_state[0]} ") |
import copy
class Polynomial:
def __init__(self, dictpoly={}):
"""initializing dictionary of polynomial"""
self.dictpoly = dictpoly
def printpoly(self, expo):
"""print coefficient of each needed exponent"""
if self.dictpoly.has_key(expo) == 1:
polyprint = self.dictpoly[expo]
else:
polyprint = 0
return polyprint
def __str__(self):
"""defining method str for representin poly as str"""
self.str = ""
for key,value in self.dictpoly.items():
if value==0:
self.str += ""
elif value>0:
sign = "+"
self.str += sign
self.str += str(value) +"X**"+ str(key)
elif value<0:
self.str += str(value) +"X**"+ str(key)
return self.str
def __len__(self):
"""modifying the length function""" #it will give us the highest value
return max(self.dictpoly.keys()) #of exponent in a polynomial
def __add__(self,other):
"""modifying add operator""" #it will give us the result of
copydictpoly = copy.deepcopy(self.dictpoly) #adding two polynomials
for key in other.dictpoly.keys():
if key in self.dictpoly:
copydictpoly[key] += other.dictpoly[key]
else:
copydictpoly[key] = other.dictpoly[key]
return Polynomial(copydictpoly)
def __sub__(self,other):
"""modifying subtraction operator""" #it will give us the result of
copydictpoly = copy.deepcopy(self.dictpoly) #subtraction of two polynomials
for key in other.dictpoly.keys():
if key in self.dictpoly:
copydictpoly[key] -= other.dictpoly[key]
else:
copydictpoly[key] = -other.dictpoly[key]
return Polynomial(copydictpoly)
### Importing polynomials by user ################
dict1 = {}
decision1 = str('y')
print "enter polynomial1 terms"
while(decision1 == 'y'):
expo1 = int(raw_input("enter exponent value \n"))
coef1 = int(raw_input("enter coeff value \n"))
dict1[expo1]=coef1
decision1 = raw_input("y to enter another term, n to polynomial2\n")
dict2 = {}
decision2 = str('y')
print "enter polynomial2 terms"
while(decision2 == 'y'):
expo2 = int(raw_input("enter exponent value \n"))
coef2 = int(raw_input("enter coeff value \n"))
dict2[expo2]=coef2
decision2 = raw_input("y to enter another term, press n to exit\n")
pol1 = Polynomial(dict1)
pol2 = Polynomial(dict2)
##################################################
print "polynomial1 =\t", pol1
print "highest exponent of polynomial: ", len(pol1)
print "polynoimal2 =\t", pol2
print "highest exponent of polynomial: ", len(pol2)
print "polynomial1 + polynomial2 =\t", pol1+pol2
print "polynomial1 - polynomial2 =\t", pol1-pol2
##################################################
|
def show_magicians(magicians_name):
for name in magicians_name:
print(name)
magicians_name = ['aaa', 'bbb', 'ccc']
show_magicians(magicians_name)
|
import glob
import os
for file in glob.glob("*.c"):
if os.access(file, os.R_OK):
print(file)
|
from nanpy import (ArduinoApi, SerialManager, Servo)
from time import sleep
try:
connection = SerialManager()
a = ArduinoApi(connection = connection)
except:
print("Failed to connect to Arduino")
servoPins = list()
for i in range(6):
number = "";
try:
number = int(input("Enter ServoPWM pin number: "))
except:
print("End of user input!")
break
servoPins.append(number)
#Setup arduino pins like in arduino IDE
servos = []
for i in range(len(servoPins)):
servos.append(Servo(servoPins[i]))
servos[i].write(0)
try:
while True:
try:
while True:
for i in range(len(servos)):
print("Current State: ",i,": ", servos[i].read())
except KeyboardInterrupt:
serv = input("What servo? : ")
change = input("What change? : ")
servos[serv].write(change)
print("Changed!")
except:
servos = []
for i in range(len(servos)):
servos[i].detach()
print("Servo EXITING")
|
import json
import datetime
from yoolotto.legacy.models import Drawings as LegacyDrawings, Drawings2 as LegacyDrawings2
from yoolotto.lottery.game.manager import GameManager
from yoolotto.lottery.models import LotteryGameComponent, LotteryDraw
from yoolotto.util.serialize import dumps
class MigrateDraws(object):
def __init__(self):
# Retrieve Components
self._powerball = LotteryGameComponent.objects.get(identifier="Powerball")
self._megamillions = LotteryGameComponent.objects.get(identifier="MegaMillions")
self._megaplier = LotteryGameComponent.objects.get(identifier="Megaplier")
def run(self):
start = datetime.date(2010, 1, 1)
# Create Merged Representation
merged = {}
for _cls in LegacyDrawings, LegacyDrawings2:
records = _cls.objects.all()
for record in records:
date = datetime.datetime.fromtimestamp(record.dtime).date()
if date < start:
continue
if date not in merged:
merged[date] = []
merged[date].append(record)
# Sort
keys = merged.keys()
keys.sort()
for key in keys:
for record in merged[key]:
_cls = None
if record.type == "powerball":
self._handle_powerball(key, record)
elif record.type == "megamillions":
self._handle_megamillions(key, record)
else:
raise RuntimeError()
def _handle_powerball(self, date, record):
existing = LotteryDraw.objects.filter(component=self._powerball,
date=date)
result = dumps(json.loads(record.winnings))
# Check Data Conflicts
if existing:
existing = existing[0]
if existing.result and existing.result != result:
raise RuntimeError("Data Mismatch -%s- -%s-" % (existing.result, result))
# Check Data
handler = GameManager.get(self._powerball.parent.name)
_result = handler.decode(record.winnings)
handler.validate_numbers(json.loads(record.winnings))
# Create
try:
draw = LotteryDraw.objects.get(component=self._powerball, date=date)
except LotteryDraw.DoesNotExist:
draw, created = LotteryDraw.objects.get_or_create(
component=self._powerball, date=date)
finally:
draw.result = _result
draw.official = True
draw.save()
def _handle_megamillions(self, date, record):
winnings = json.loads(record.winnings)
_base = winnings[:-1]
_mega = winnings[-1:]
base = dumps(_base)
mega = dumps(_mega)
# Check Existing
existing = LotteryDraw.objects.filter(component=self._megamillions,
date=date)
if existing:
existing = existing[0]
if existing.result and existing.result != base:
raise RuntimeError("Data Mismatch -%s- -%s-" % (existing.result, base))
# Check Existing (Megaplier)
existing = LotteryDraw.objects.filter(component=self._megaplier,
date=date)
if existing:
existing = existing[0]
if existing.result and existing.result != mega:
raise RuntimeError("Data Mismatch -%s- -%s-" % (existing.result, mega))
# Check Data
handler = GameManager.get(self._megamillions.parent.code)
handler.validate_numbers(_base)
result_base = handler.decode(base)
result_mega = handler.decode(mega, "Megaplier")
# Create
try:
draw = LotteryDraw.objects.get(component=self._megamillions, date=date)
except LotteryDraw.DoesNotExist:
draw, created = LotteryDraw.objects.get_or_create(
component=self._megamillions, date=date)
finally:
draw.result = result_base
draw.official = True
draw.save()
# Create Megaplier
try:
draw = LotteryDraw.objects.get(component=self._megaplier, date=date)
except LotteryDraw.DoesNotExist:
draw, created = LotteryDraw.objects.get_or_create(
component=self._megaplier, date=date)
finally:
draw.result = result_mega
draw.official = True
draw.save() |
try:
from .pglocal import *
except:
try:
from .sqlite import *
except:
pass
try:
from .production import *
except:
pass |
#encoding=utf-8
import pymongo,os
def connect_mongodb():
# servers="mongodb://localhost:27017"
# conn = pymongo.Connection(servers)
# print conn.database_names()
# db = conn.my_mongodb #连接库
client = pymongo.MongoClient("localhost", 27017)
print client.database_names()
db=client.test
print db.collection_names()
if "docs" in db.collection_names():
db.drop_collection("docs")
db.create_collection("docs",)
docs=db.docs
for doc in docs.find():
print doc
restaurants=db.restaurants
print restaurants.find().count()
restaurant={"sb":"test"}
restaurants.insert(restaurant)
restaurants.find_one({u"sb":u"test"}) # please use "u" for the unicode
print restaurants.find().count()
return
return client.test
#return db
def str_process(string,db):
d={}
if string == '\n':
return
string2=str(string)
print '-----'+string
string2=string2.split(' ')
print '---------------'
print string2
for i in string2:
print i
print '------------'
string2[3].split('\n')
d['projectcode']=string2[0]
d['pagename']=string2[1]
d['pageview']=string2[2]
d['bytes']=string2[3][:-1]
db.user.insert(d)
def file_process(source_file,db):
string2=''
f=open(source_file,'r')
print 'file name :'+source_file
while True:
string2=f.readline()
if string2 == '':
break
string2=str_process(string2,db)
print string2
def get_dir_list(dir): #input the dir ,will output the all filename
dat0=[]
for i in os.listdir(dir):
dat0.append(i)
return dat0
def all_file_process():
dir_file_name=''
dir_list=[]
dir_file_name=raw_input('please input the dir name:')
dir_list=get_dir_list(dir_file_name)
print dir_list
db=connect_mongodb()
for i in dir_list:
if str(i) != 'log_file_process.py':
file_process(str(i),db)
all_file_process()
def main():
connect_mongodb()
if __name__=="__main__":
main() |
from flask import Flask,render_template,request,redirect,url_for
import pymysql
app=Flask(__name__)
@app.route("/update_action/<int:movie_id>", methods=['GET','POST'])
def update_action(movie_id):
if request.method=="POST":
sql="update new_movies.my_movies set movie_name=(%s), timing=(%s), location=(%s) where movie_id=(%s)"
n=request.form['movie_name']
print(n)
args=(request.form['movie_name'], request.form['timing'], request.form['location'],movie_id)
cur=con.cursor()
cur.execute(sql,args)
cur.execute('commit')
return redirect(url_for('index'))
#return render_template('index1.html')
@app.route("/update_movie/<int:movie_id>", methods=['GET','POST'])
def update_movie(movie_id):
if request.method=="POST":
sql="select * from new_movies.my_movies where movie_id=(%s)"
args=(movie_id)
ulist = []
cur=con.cursor()
cur.execute(sql, args)
#ulist=cur.fetchone()
for item in cur:
ulist.append(item)
return render_template('update.html', ulist=ulist)
@app.route("/delete_movie/<int:movie_id>", methods=["POST"])
def delete_movie(movie_id):
cur=con.cursor()
sql1="select movie_name from new_movies.my_movies where movie_id=(%s)"
cur.execute(sql1, movie_id)
movie_name=cur.fetchone()
sql="Delete from new_movies.my_movies where movie_id=(%s)"
cur.execute(sql,movie_id)
cur.execute("commit")
#return "<h3>Contact Deleted Successfully</h3>"
print(movie_name[0])
return render_template('delete.html', movie_name=movie_name[0])
@app.route('/',methods=['GET','POST'])
def index():
if(request.method=="POST"):
#movie_id=request.form["movie_id"]
movie_name=request.form["movie_name"]
timing=request.form["timing"]
location=request.form["location"]
print(timing)
sql="INSERT INTO new_movies.my_movies(movie_name,timing,location) VALUES(%s,%s,%s)"
args=(movie_name,timing,location)
print(args)
cur=con.cursor()
cur.execute(sql,args)
cur.execute("commit")
sql='select * from new_movies.my_movies'
cur=con.cursor()
cur.execute(sql)
#print(cur)
mylist=[]
for c in cur:
mylist.append(c)
# print(mylist)
return render_template('index.html', mylist=mylist)
con=pymysql.connect(host='localhost',
port=3306,
user='root',
password='admin',
db='new_movies'
)
if __name__=='__main__':
app.run(debug=True) |
import argparse
import pathlib
from common import copytree, get_package_root
def main(directory, source_folder, target_root):
"""
Copies the source folder and its subfolders to the package, under the
desired target_root. This will not verify the target_root is proper.
"""
source_folder = pathlib.Path(source_folder)
if not source_folder.exists():
raise FileNotFoundError(
"Source folder not found: {}".format(source_folder))
package_root = get_package_root(directory)
if not package_root.exists():
raise FileNotFoundError(
"Root folder for the package was not found: {}".format(directory))
target_root_dir = pathlib.Path(package_root, 'data', target_root)
if not target_root_dir.exists():
# Create a new target root folder
target_root_dir.mkdir()
copytree(source_folder, target_root_dir.joinpath(source_folder))
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--directory',
help="""Root directory to create for this package. You can
also set the PKG_ROOT_DIRECTORY environment variable. This
directory must be an absolute path.""",
required=False,
default=None)
parser.add_argument('-s',
'--source_folder',
help="""Path to the directory you would like to add to
your package.""",
required=True)
parser.add_argument('-t',
'--target_root',
help="""Installation target root. See NIPM documentation
for options""",
required=True)
if __name__ == '__main__':
args = parser.parse_args()
main(args.directory, args.source_folder, args.target_root)
|
n = input ()
l = len (n)
s = 0
for i in range (l): s += int (n [i])
s1 = 3 - s % 3
for i in range (l):
p = int (n [i]) + s1
if p < 10:
p += (9 - p) // 3 * 3
n = n [:i] + str (p) + n [i + 1:]
print (n)
break
elif i == l - 1:
p = int (p - 1.5 * s1 ** 2 + 4.5 * s1 - 6)
n = n [:-1] + str (p)
print (n) |
# spiral.py
# COMP9444, CSE, UNSW
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import math
class PolarNet(torch.nn.Module):
def __init__(self, num_hid):
super(PolarNet, self).__init__()
# INSERT CODE HERE
input_nodes = 2
output_nodes = 1
self.layer_one = nn.Linear(input_nodes, num_hid)
self.tanh = nn.Tanh()
self.layer_two = nn.Linear(num_hid, output_nodes)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
def to_polar(i):
output = i.numpy()
for t in output:
t[0],t[1] = math.sqrt((t[0]**2 + t[1]**2)) , math.atan2(t[1],t[0])
output = torch.from_numpy(output)
return output
output = to_polar(input)
output1 = self.layer_one(output)
output2 = self.tanh(output1)
output3 = self.layer_two(output2)
output4 = self.sigmoid(output3)
self.hidden_layers = [output2]
return output4
class RawNet(torch.nn.Module):
def __init__(self, num_hid):
super(RawNet, self).__init__()
# INSERT CODE HERE
input_nodes = 2
output_nodes = 1
self.layer_one = nn.Linear(input_nodes, num_hid)
self.tanh = nn.Tanh()
self.layer_two = nn.Linear(num_hid, num_hid)
self.output_layer = nn.Linear(num_hid, output_nodes)
self.sigmoid = nn.Sigmoid()
##init 0.25
def forward(self, input):
# CHANGE CODE HERE
output = self.layer_one(input)
output1 = self.tanh(output)
output2 = self.layer_two(output1)
output3 = self.tanh(output2)
output4 = self.output_layer(output3)
output5 = self.sigmoid(output4)
self.hidden_layers = [output1, output3]
return output5
class ShortNet(torch.nn.Module):
def __init__(self, num_hid):
super(ShortNet, self).__init__()
# INSERT CODE HERE
input_nodes = 2
output_nodes = 1
hid1 = num_hid
hid2 = hid1 * 2
self.layer_one = nn.Linear(input_nodes, hid1)
self.tanh = nn.Tanh()
self.layer_two = nn.Linear(hid1, hid2)
self.output_layer = nn.Linear(hid2, output_nodes)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
# CHANGE CODE HERE
output = self.layer_one(input)
output1 = self.tanh(output)
output2 = self.layer_two(output1)
output3 = self.tanh(output2)
output4 = self.output_layer(output3)
output5 = self.sigmoid(output4)
self.hidden_layers = [output1, output2]
return output5
## hid1 = 9
#hid2 = 18
#init = 0.25
def graph_hidden(net, layer, node):
# INSERT CODE HERE
xrange = torch.arange(start=-7,end=7.1,step=0.01,dtype=torch.float32)
yrange = torch.arange(start=-6.6,end=6.7,step=0.01,dtype=torch.float32)
xcoord = xrange.repeat(yrange.size()[0])
ycoord = torch.repeat_interleave(yrange, xrange.size()[0], dim=0)
grid = torch.cat((xcoord.unsqueeze(1),ycoord.unsqueeze(1)),1)
with torch.no_grad(): # suppress updating of gradients
net.eval()
output = net(grid)
# print(output.size()) # toggle batch norm, dropout
output = net.hidden_layers[layer - 1][:, node]
# print(output.size()) # toggle batch norm, dropout
net.train() # toggle batch norm, dropout back again
pred = (output >= 0.0).float()
# plot function computed by model
plt.clf()
plt.pcolormesh(xrange,yrange,pred.cpu().view(yrange.size()[0],xrange.size()[0]), cmap='Wistia')
|
#### -*- coding:utf-8 -*- #######
import socket
from protocol import PackagedConnection, ClientDisconnected, BrokenPackage
from server import PING, PONG, CLOSE
from threading import Thread
def main():
test_data = [
b"SOME DATA",
b"B"*5000,
b"",
b"1234567890",
b"LOL"
]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 7777))
conn = PackagedConnection(s)
# Simplified handshake. If something failes on client - just die
conn.send(PING)
# print('SENDING PING')
server_pong = conn.recv()
assert server_pong == PONG, server_pong
# print('HANDSHAKE OK')
for i, data in enumerate(test_data):
conn.send(data)
# print(i, conn.recv() == data)
conn.send(CLOSE)
conn.close()
if __name__ == "__main__":
# Spam server with connections
count = 0
def execute():
main()
# Not executed if error
global count
count += 1
threads = [Thread(target=execute) for i in range(1000)]
[x.start() for x in threads]
[x.join() for x in threads]
print('Finished successfuly', count)
|
import socketio
sio = socketio.Client()
def sendMessage(event_name, data, namespaceSIO):
#callback function
sio.emit(event_name, data, namespace=namespaceSIO)
print("message emitted")
@sio.on('responsepi', namespace='/socket')
def responsepi(msg):
print('I received a message!', msg['data'])
def connectToPi():
sio.connect('http://raspberrypi.local:5050/', namespaces=['/socket'])
print('my sid is', sio.sid)
|
#!/usr/bin/env python
import math
import pickle
from random import randint
from itertools import permutations
from sets import Set
MAX_SEARCH_LEN=2
C1=1.95
C2=1.95
class node():
def __init__(self,parent,val):
self.val=val
self.parent=parent
self.nextDict=None
self.completed=False
self.explored=False
def explore(self,e):
self.explored=True
self.nextDict={}
for sr in get_available_regions(self.val,e):
newNode=node(self,sr)
self.nextDict[sr]=newNode
def get_next_and_explore(self,r,e):
if self.nextDict.get(r) is None:
print "ERROR",r
if self.nextDict[r].explored is True:
return self.nextDict[r]
self.nextDict[r].explore(e)
return self.nextDict[r]
def check_parent_node_completion(self):
for k,v in self.nextDict.items():
if v.completed is False:
return
self.completed=True
if self.parent is None:
return
self.parent.check_parent_node_completion()
def check_and_complete_node(self):
self.completed=True
self.nextDict={}
if self.parent is None:
return
self.parent.check_parent_node_completion()
def printValues(self):
print self.val,self.completed
if self.nextDict is None:
return
for k,v in self.nextDict.items():
v.printValues()
def getSize(self):
val=1
if self.nextDict is None:
return 0
for k,v in self.nextDict.items():
val+=v.getSize()
return val
def get_available_regions(init_r,e):
r_list=[]
for r in e.region:
if _dist(init_r,r) <= MAX_SEARCH_LEN:
r_list.append(r)
return r_list
def _check_init(values,s,t):
if values['N'].get(s) == None:
values['N'][s]=0.
if values['Na'].get(s) == None:
values['Na'][s]={}
values['Na'][s][t]=0.
elif values['Na'][s].get(t)==None:
values['Na'][s][t]=0.
if values['Q'].get(s) == None:
values['Q'][s]={}
values['Q'][s][t]=0.
elif values['Q'][s].get(t)==None:
values['Q'][s][t]=0.
def _step_v(v,s,t):
v['N'][s]+=1
v['Na'][s][t]+=1
def _ucb(r,n,na):
return r+C1*math.sqrt(math.log(n+1)/(na+1))
def get_candidate_task(v,objectives,s,explore=True):
_max_val=None
_task=None
for t,objective_wp in objectives.items():
_check_init(v,s,t)
if explore==True:
val=_ucb(v['Q'][s][t],v['N'][s],v['Na'][s][t])
else:
val=v['Q'][s][t]
if v['Na'][s][t]==0:
_step_v(v,s,t)
return t
if _max_val is None:
_max_val=val
_task=t
elif val > _max_val:
_max_val=val
_task=t
_step_v(v,s,_task)
return _task
def task_value_update(v,s,t,r):
v['Q'][s][t]+=(r-v['Q'][s][t])/(v['Na'][s][t])
def _dist(r1,r2):
return abs(r1[0]-r2[0])+abs(r1[1]-r2[1])
def get_sub_env_state(init_loc,env,sub_env):
environment_state=env.getObs()#[x][y][obj]
num_obj=len(env.objectives)
sub_env_state=()
temp_r_list=[]
for r in sub_env:
r_state=()
for k,v in env.objectives.items():
r_state=r_state+(environment_state[r[0]][r[1]][k],)
regional_state=(r_state,_dist(init_loc,r))
repeated=0
for i in range(len(temp_r_list)):
if r==temp_r_list[i]:
repeated=i
break
#sub_env_state=sub_env_state + (regional_state,)
sub_env_state=sub_env_state + (regional_state,)+(repeated,)
init_loc=r
temp_r_list.append(r)
return sub_env_state
def printV(v):
if v['Qe'] is not None:
for k,a in v['Qe'].items():
print 'Qe',k,a
def update_full_sub_envs(env_dict,sub_env,r,mt):
if env_dict.get(sub_env) is None:
env_dict[sub_env]=(r,mt)
return
if env_dict[sub_env][0] < r:#BIG TYPO?
env_dict[sub_env]=(r,mt)
def _init_env_values(v,s):
if v['Ne'].get(s) is None:
v['Ne'][s]=0
if v['M2'].get(s) is None:
v['M2'][s]=0
if v['Qe'].get(s) is None:
v['Qe'][s]=0
def sub_env_value_update(v,s,r):
if len(s)<1:
return
_init_env_values(v,s)
v['Ne'][s]+=1
d=r-v['Qe'][s]
v['Qe'][s]+=d/(v['Ne'][s])
d2=r-v['Qe'][s]
v['M2'][s]+=d*d2
sub_env_value_update(v,s[:-1],r)
def get_var(v,s):
_init_env_values(v,s)
if v['Ne'][s]==1:
return 0
return v['M2'][s]/(v['Ne'][s]-1)
def sub_mod_search(v,env):
sub_env_set=Set()
r_list=[]
max_state=None
for r in env.region:
if _dist(env.region_position,r) <= 8:
r_list.append(r)
while len(sub_env_set)<4:
_max_val=0
_region=None
for r in r_list:
if r in sub_env_set:
continue
temp_r_set=Set()
temp_r_set.add(r)
for p in list(permutations(sub_env_set|temp_r_set)):
s=get_sub_env_state(env.region_position,env,p)
if v['Q'].get(s) is None:
val=0
else:
val=max(v['Q'][s].values())
if val>_max_val:
if _region not in sub_env_set:
_max_val=val
_region=r
max_state=s
if _region is None:
return 0,''
sub_env_set.add(_region)
return _max_val,max_state
_max_state=None
_max_value=0
for p in list(permutations(sub_env_set)):
s=get_sub_env_state(env.region_position,env,p)
if v['Q'].get(s) is None:
val=0
else:
val=max(v['Q'][s].values())
if val>_max_val:
_max_value=val
_max_state=s
print "max: ", _max_value, _max_state
print len(p)
return _max_val,_max_state
def get_candidate_region(v,env,sub_env,explore,completion_node,num_step):
_max_val=None
_region=None
# print completion_node
for r,node in completion_node.nextDict.items():
if node.completed is True:
continue
if len(sub_env)>0:
if _dist(sub_env[-1],r)>MAX_SEARCH_LEN:
print sub_env[-1],r
print "THIS SHOULD NEVER HAPPEN"
continue
elif _dist(env.region_position,r)>MAX_SEARCH_LEN:
print "THIS ALSO SHOULDNT HAPPEN"
continue
s=get_sub_env_state(env.region_position,env,sub_env+(r,))
_init_env_values(v,s)
if explore is True:
# print sub_env,s,v['Ne'][s],"HERE"
if v['Ne'][s]==0:
return r
if randint(0,100)>5*(1-1/math.exp(num_step/100.)):
val=v['Qe'][s]+C2*get_var(v,s)/math.sqrt(1+v['Ne'][s])
else:
val=randint(0,100)
#print get_var(v,s),'2'
else:
if v['Q'].get(s) is None:
val=0
else:
val=max(v['Q'][s].values())
#val=v['Q'][s]
if _max_val is None:
_max_val=val
_region=r
elif _max_val < val:
_max_val=val
_region=r
return _region
def get_best_full_sub_envs(sub_env_dict):
_max_val=None
_max_key=None
_max_a=None
for k,a in sub_env_dict.items():
if _max_val is None:
_max_val=a[0]
_max_key=k
_max_a=a[1]
elif a[0] > _max_val:
_max_val=a[0]
_max_key=k
_max_a=a[1]
return _max_key,_max_val,_max_a
def save_data(filename,v):
with open(filename,'wb') as fp:
pickle.dump(v,fp)
|
import os
import re
class IncludeParser:
def __init__(self, strict=True):
self.__strict = strict
def get_includes(self, filename, include_dirs, args):
filedir = os.path.dirname(os.path.abspath(filename))
def list_includes(filename):
standard = []
user = []
hfile = open(filename)
reg_comp = re.compile(r'^#include\s+(<|")(\S+)(>|")')
for line in hfile:
reg = re.match(reg_comp, line)
if reg is not None:
include = reg.group(2).strip()
if not self.__strict or reg.group(1) == '<':
standard.append(os.path.basename(include))
else:
user.append(include)
return standard, user
def get_abs(include, include_dirs):
resolved_path = os.path.realpath(include)
if not os.path.isabs(include):
for dir in [filedir] + include_dirs:
resolved_path = os.path.realpath(os.path.join(dir, include))
if os.path.isfile(resolved_path):
break
if not os.path.isfile(resolved_path):
raise IOError(include + " not found")
return resolved_path
standard, user = list_includes(filename)
return standard + [get_abs(include, include_dirs) for include in user]
if __name__ == "__main__":
include_parser = IncludeParser(strict=False)
for inc in include_parser.get_includes("dummy.cpp", ["/mnt/c/Users/lenno/sandbox", "/usr/include"], []):
print(inc) |
if __name__ == "__main__":
usernames = [
'beheerder',
'kjevo',
'user345',
'user89',
'user092'
]
for username in usernames:
print("Hello! " + username)
if username == "beheerder":
print("Do you want a status rapport? \n") |
""" For use in dumping single frame ground truths of EuRoc Dataset
Adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/blob/0caec9ed0f83cb65ba20678a805e501439d2bc25/data/kitti_raw_loader.py
You-Yi Jau, yjau@eng.ucsd.edu, 2019
Rui Zhu, rzhu@eng.ucsd.edu, 2019
"""
from __future__ import division
import numpy as np
from pathlib import Path
from tqdm import tqdm
import scipy.misc
from collections import Counter
from pebble import ProcessPool
import multiprocessing as mp
ratio_CPU = 0.8
default_number_of_process = int(ratio_CPU * mp.cpu_count())
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import traceback
import coloredlogs, logging
logging.basicConfig()
logger = logging.getLogger()
coloredlogs.install(level="INFO", logger=logger)
import cv2
from kitti_tools.utils_kitti import (
load_velo_scan,
rectify,
read_calib_file,
transform_from_rot_trans,
scale_intrinsics,
scale_P,
)
import dsac_tools.utils_misc as utils_misc
# from utils_good import *
from glob import glob
from dsac_tools.utils_misc import crop_or_pad_choice
from utils_kitti import load_as_float, load_as_array, load_sift, load_SP
import yaml
DEEPSFM_PATH = "/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils/deepSfm"
sys.path.append(DEEPSFM_PATH)
import torch
from models.model_wrap import PointTracker
from models.model_wrap import SuperPointFrontend_torch
from kitti_odo_loader import KittiOdoLoader
from kitti_odo_loader import *
coloredlogs.install(level="INFO", logger=logger)
# coloredlogs.install(level="DEBUG", logger=logger)
class euroc_seq_loader(KittiOdoLoader):
def __init__(
self,
dataset_dir,
img_height=375,
img_width=1242,
cam_ids=["00"],
get_X=False,
get_pose=False,
get_sift=False,
get_SP=False,
sift_num=2000,
if_BF_matcher=False,
save_npy=True,
):
# depth_size_ratio=1):
# dir_path = Path(__file__).realpath().dirname()
self.dataset_dir = Path(dataset_dir)
self.img_height = img_height
self.img_width = img_width
self.cam_ids = cam_ids # ['cam0/data']
logging.info(f"cam id: {cam_ids}")
# assert self.cam_ids == ['02'], 'Support left camera only!'
self.cid_to_num = {"00": 0, "01": 1}
self.debug = True # True
if self.debug:
self.train_seqs = ["MH_01_easy"]
self.test_seqs = ["MH_01_easy"]
else:
self.train_seqs = [
"MH_01_easy",
"MH_02_easy",
"MH_04_difficult",
"V1_01_easy",
"V1_02_medium",
"V1_03_difficult",
]
self.test_seqs = [
"MH_02_easy",
"MH_05_difficult",
"V2_01_easy",
"V2_02_medium",
"V2_03_difficult",
]
# to_2darr = lambda x: np.array(x)
self.test_seqs = np.char.add(self.test_seqs, "/mav0")
self.train_seqs = np.char.add(self.train_seqs, "/mav0")
# self.train_seqs = [4]
# self.test_seqs = []
# self.train_seqs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# self.test_seqs = []
# self.map_to_raw = {
# "00": "2011_10_03_drive_0027",
# "01": "2011_10_03_drive_0042",
# "02": "2011_10_03_drive_0034",
# "03": "2011_09_26_drive_0067",
# "04": "2011_09_30_drive_0016",
# "05": "2011_09_30_drive_0018",
# "06": "2011_09_30_drive_0020",
# "07": "2011_09_30_drive_0027",
# "08": "2011_09_30_drive_0028",
# "09": "2011_09_30_drive_0033",
# "10": "2011_09_30_drive_0034",
# }
self.get_X = get_X
self.get_pose = get_pose
self.get_sift = get_sift
self.get_SP = get_SP
self.save_npy = save_npy
if self.save_npy:
logging.info("+++ Dumping as npy")
else:
logging.info("+++ Dumping as h5")
if self.get_sift:
self.sift_num = sift_num
self.if_BF_matcher = if_BF_matcher
self.sift = cv2.xfeatures2d.SIFT_create(
nfeatures=self.sift_num, contrastThreshold=1e-5
)
# self.bf = cv2.BFMatcher(normType=cv2.NORM_L2)
# FLANN_INDEX_KDTREE = 0
# index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
# search_params = dict(checks = 50)
# self.flann = cv2.FlannBasedMatcher(index_params, search_params)
# self.sift_matcher = self.bf if BF_matcher else self.flann
self.scenes = {"train": [], "test": []}
if self.get_SP:
self.prapare_SP()
# no need two functions
self.collect_train_folders()
self.collect_test_folders()
def read_images_files_from_folder(self, drive_path, scene_data, folder="rgb"):
# print(f"cid_num: {scene_data['cid_num']}")
# img_dir = os.path.join(drive_path, "cam%d" % scene_data["cid_num"])
# img_files = sorted(glob(img_dir + "/data/*.png"))
print(f"drive_path: {drive_path}")
## given that we have matched time stamps
arr = np.genfromtxt(
f"{drive_path}/{folder}/data_f.txt", dtype="str"
) # [N, 2(time, path)]
img_files = np.char.add(str(drive_path) + f"/{folder}/data/", arr[:, 1])
img_files = [Path(f) for f in img_files]
img_files = sorted(img_files)
print(f"img_files: {img_files[0]}")
return img_files
def collect_train_folders(self):
for seq in self.train_seqs:
seq_dir = os.path.join(self.dataset_dir, seq)
self.scenes["train"].append(seq_dir)
def collect_test_folders(self):
for seq in self.test_seqs:
seq_dir = os.path.join(self.dataset_dir, seq)
self.scenes["test"].append(seq_dir)
def load_image(self, scene_data, tgt_idx, show_zoom_info=True):
# use different image filename
img_file = Path(scene_data["img_files"][tgt_idx])
if not img_file.is_file():
logging.warning("Image %s not found!" % img_file)
return None, None, None
img_ori = scipy.misc.imread(img_file)
if [self.img_height, self.img_width] == [img_ori.shape[0], img_ori.shape[1]]:
return img_ori, (1.0, 1.0), img_ori
else:
zoom_y = self.img_height / img_ori.shape[0]
zoom_x = self.img_width / img_ori.shape[1]
if show_zoom_info:
logging.warning(
"[%s] Zooming the image (H%d, W%d) with zoom_yH=%f, zoom_xW=%f to (H%d, W%d)."
% (
img_file,
img_ori.shape[0],
img_ori.shape[1],
zoom_y,
zoom_x,
self.img_height,
self.img_width,
)
)
img = scipy.misc.imresize(img_ori, (self.img_height, self.img_width))
return img, (zoom_x, zoom_y), img_ori
# def collect_scene_from_drive(self, drive_path):
def collect_scene_from_drive(self, drive_path, split="train"):
# adapt for Euroc dataset
train_scenes = []
logging.info("Gathering info for %s..." % drive_path)
for c in self.cam_ids:
scene_data = {
"cid": c,
"cid_num": self.cid_to_num[c],
"dir": Path(drive_path),
"rel_path": str(Path(drive_path).parent.name) + "_" + c,
}
# img_dir = os.path.join(drive_path, 'image_%d'%scene_data['cid_num'])
# scene_data['img_files'] = sorted(glob(img_dir + '/*.png'))
scene_data["img_files"] = self.read_images_files_from_folder(
drive_path, scene_data, folder="cam0"
)
scene_data["N_frames"] = len(scene_data["img_files"])
assert scene_data["N_frames"] != 0, "No file found for %s!" % drive_path
scene_data["frame_ids"] = [
"{:06d}".format(i) for i in range(scene_data["N_frames"])
]
img_shape = None
zoom_xy = None
show_zoom_info = True
# read images
for idx in tqdm(range(scene_data["N_frames"])):
img, zoom_xy, _ = self.load_image(scene_data, idx, show_zoom_info)
# print(f"zoom_xy: {zoom_xy}")
show_zoom_info = False
if img is None and idx == 0:
logging.warning("0 images in %s. Skipped." % drive_path)
return []
else:
if img_shape is not None:
assert img_shape == img.shape, (
"Inconsistent image shape in seq %s!" % drive_path
)
else:
img_shape = img.shape
# print(img_shape)
scene_data["calibs"] = {
"im_shape": [img_shape[0], img_shape[1]],
"zoom_xy": zoom_xy,
"rescale": True if zoom_xy != (1.0, 1.0) else False,
}
# Get geo params from the RAW dataset calibs
calib_file = os.path.join(
drive_path, "cam%d" % scene_data["cid_num"], "sensor.yaml"
)
# calib_file = f"{scene_data['img_files'][0].str()}/../../sensor.yaml"
P_rect_ori, cam2body_mat = self.get_P_rect(calib_file, scene_data["calibs"])
P_rect_ori_dict = {c: P_rect_ori}
intrinsics = P_rect_ori_dict[c][:, :3]
logging.debug(f"intrinsics: {intrinsics}, cam2body_mat: {cam2body_mat}")
calibs_rects = self.get_rect_cams(intrinsics, cam2body_mat[:3]) ##### need validation
# calibs_rects = {"Rtl_gt": cam2body_mat}
cam_2rect_mat = intrinsics
# drive_in_raw = self.map_to_raw[drive_path[-2:]]
# date = drive_in_raw[:10]
# seq = drive_in_raw[-4:]
# calib_path_in_raw = Path(self.dataset_dir)/'raw'/date
# imu2velo_dict = read_calib_file(calib_path_in_raw/'calib_imu_to_velo.txt')
# velo2cam_dict = read_calib_file(calib_path_in_raw/'calib_velo_to_cam.txt')
# cam2cam_dict = read_calib_file(calib_path_in_raw/'calib_cam_to_cam.txt')
# velo2cam_mat = transform_from_rot_trans(velo2cam_dict['R'], velo2cam_dict['T'])
# imu2velo_mat = transform_from_rot_trans(imu2velo_dict['R'], imu2velo_dict['T'])
# cam_2rect_mat = transform_from_rot_trans(cam2cam_dict['R_rect_00'], np.zeros(3))
velo2cam_mat = None
scene_data["calibs"].update(
{
"K": intrinsics,
"P_rect_ori_dict": P_rect_ori_dict,
"cam_2rect": cam_2rect_mat,
"velo2cam": velo2cam_mat,
"cam2body_mat": cam2body_mat,
}
)
scene_data["calibs"].update(calibs_rects)
# Get pose
poses = (
np.genfromtxt(Path(drive_path) / "data_f.kitti".format(drive_path[-2:]))
.astype(np.float32)
.reshape(-1, 3, 4)
)
assert scene_data["N_frames"] == poses.shape[0], (
"scene_data[N_frames]!=poses.shape[0], %d!=%d"
% (scene_data["N_frames"], poses.shape[0])
)
scene_data["poses"] = poses
# ground truth rt for camera
scene_data["Rt_cam2_gt"] = scene_data["calibs"]["Rtl_gt"]
train_scenes.append(scene_data)
return train_scenes
def get_P_rect(self, calib_file, calibs):
# calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
calib_data = loadConfig(calib_file)
height, width, K, D = load_intrinsics(calib_data)
transformation_base_camera = load_extrinsics(calib_data)
P_rect = np.concatenate((K, [[0], [0], [0]]), axis=1)
if calibs["rescale"]:
P_rect = scale_P(P_rect, calibs["zoom_xy"][0], calibs["zoom_xy"][1])
return P_rect, transformation_base_camera
@staticmethod
def load_velo(scene_data, tgt_idx, calib_K=None):
"""
create point clouds from depth image, return array of points
return:
np [N, 3] (3d points)
"""
logging.error(f"Not implemented error!! Turn off --with_X")
return None
def load_intrinsics(calib_data):
width, height = calib_data["resolution"]
# cam_info.distortion_model = 'plumb_bob'
D = np.array(calib_data["distortion_coefficients"])
# cam_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
fu, fv, cu, cv = calib_data["intrinsics"]
K = np.array([[fu, 0, cu], [0, fv, cv], [0, 0, 1]])
return height, width, K, D
# parse camera calibration yaml file
def load_extrinsics(calib_data):
# read homogeneous rotation and translation matrix
transformation_base_camera = np.array(calib_data["T_BS"]["data"])
transformation_base_camera = transformation_base_camera.reshape((4, 4))
# compute projection matrix
# projection = np.zeros((3,4))
# projection[:,:-1] = K
# cam_info.P = projection.reshape(-1,).tolist()
return transformation_base_camera
def loadConfig(filename):
import yaml
with open(filename, "r") as f:
config = yaml.load(f)
return config
# calib_file = '/data/euroc/mav0/cam0/sensor.yaml'
# calib_data = loadConfig(calib_file)
# intrinsics = load_intrinsics(calib_data)
# transformation_base_camera = load_extrinsics(calib_data)
# print(f"height, width, K, D = {intrinsics}")
# print(f"transformation_base_camera: {transformation_base_camera}")
if __name__ == "__main__":
pass
|
import multiprocessing
from matplotlib import pyplot as plt
import numpy as np
def merge_by_y(left, right):
"""
Implements merge of 2 sorted lists by the second coordinate
:param args: support explicit left/right args, as well as a two-item
tuple which works more cleanly with multiprocessing.
:return: merged list
"""
left_length, right_length = len(left), len(right)
left_index, right_index = 0, 0
merged = []
while left_index < left_length and right_index < right_length:
if left[left_index, 1] <= right[right_index, 1]:
merged.append(left[left_index])
left_index += 1
else:
merged.append(right[right_index])
right_index += 1
if left_index == left_length:
merged.extend(right[right_index:])
else:
merged.extend(left[left_index:])
return merged
# euclidean distance calculation
def distance(p1, p2):
return np.math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
# Brute force method to calculate distance
def _brute_closest_pair_finder(X):
min_dist = distance(X[0], X[1])
p1 = X[0]
p2 = X[1]
len_X = len(X)
if len_X == 2:
return p1, p2, min_dist
for i in range(len_X - 1):
for j in range(i + 1, len_X):
d = distance(X[i, :], X[j, :])
if d < min_dist: # Update min_dist and points
min_dist = d
p1, p2 = X[i, :], X[j, :]
return p1, p2, min_dist
def _boundary_merge(X, distances, point_pairs, xm):
"""
Finds the closed pair in a point set merged from 2 parts of recursion
:param X: points merged and sorted by y (axis = 1)
:param distances: smallest distances found of in left and right parts of the input
:param point_pairs: pairs of points which correspond to distances
:param xm: median by x (axis = 0)
:return: pair of closest points and distance between them
"""
min_d = min(distances) # min_d is minimum distance so far
M_ind = np.where((X[:, 0] >= (xm - min_d)) & (X[:, 0] <= (xm + min_d))) # pair_with_min_d = point_pairs[distances.index(d)]
M = X[M_ind]
p1, p2, d_M = _brute_closest_pair_finder(M) # d_M is minimum distance found on boundary
if d_M not in distances:
distances.append(d_M)
point_pairs.append((p1, p2))
print("Point pairs after boundary merge\n", point_pairs)
else:
print("The minimum distance is not on boundary.")
min_d = min(distances)
pair_with_min_d = point_pairs[distances.index(min_d)]
print("Min distance on this step\n", min_d)
print("Pair with min distance on this step\n", pair_with_min_d)
return pair_with_min_d[0], pair_with_min_d[1], min_d
# generate a process number which will be index in dictionary
def generate_process_number(process_id):
return int(hash(process_id) % 1e8)
def sort_by_y(points):
ind = np.argsort(points[:, 1])
return points[ind]
def closest_pair(points, return_dict=None, verbose=False):
if len(points) <= MIN_SIZE_OF_ARRAY_PER_THREAD:
print(multiprocessing.current_process())
n = generate_process_number(multiprocessing.current_process())
return_dict[n] = (sort_by_y(points), _brute_closest_pair_finder(points))
else:
x_median = medianSearch(points[:, 0])
if verbose:
print("Median on this step", x_median)
left = points[np.where(points[:, 0] < x_median)]
right = points[np.where(points[:, 0] >= x_median)]
jobs = []
manager = multiprocessing.Manager()
return_dict_input = manager.dict()
for data in [left, right]:
jobs.append(multiprocessing.Process(target=closest_pair, args=(data, return_dict_input)))
for job in jobs:
job.start()
if verbose:
print(multiprocessing.current_process())
for job in jobs: job.join()
for job in jobs: job.terminate()
results = return_dict_input.values()
res_len = len(results)
merged = np.array(merge_by_y(results[0][0], results[1][0]))
distances = [results[i][1][2] for i in range(res_len)]
point_pairs = [(results[i][1][0], results[i][1][1]) for i in range(res_len)]
if verbose:
print("\nResult of 2 parallel task execution")
print("Current shape of merged points", merged.shape)
print("Min distances found by each of tasks\n", distances)
print("Point pairs after merge of tasks\n", point_pairs)
res_boundary_merge = _boundary_merge(merged, distances, point_pairs, x_median)
n = generate_process_number(multiprocessing.current_process())
if return_dict is None: return_dict = manager.dict()
return_dict[n] = (merged, res_boundary_merge)
return res_boundary_merge
# calculates median value in a list
def medianSearch(list):
return np.median(list)
# write integer data to csv file
def write_to_file(data, file_name):
np.savetxt(file_name + ".csv", data, fmt='%d', delimiter=",")
# creating scatterplot of a 2d dataset
def plot_points(data, plot_name, show=False):
plt.scatter(data[:, 0], data[:, 1])
plt.xlabel("x")
plt.ylabel("y")
plt.title(plot_name)
plt.savefig(plot_name + ".png")
if show:
plt.show()
def min_size_of_array(full_input_len):
return full_input_len / NUM_WORKERS
NUM_WORKERS = 4
ARRAY_SIZE = 2 ** 5
MIN_SIZE_OF_ARRAY_PER_THREAD = ARRAY_SIZE / NUM_WORKERS
if __name__ == "__main__":
np.random.seed(123)
input_points = (np.random.randn(ARRAY_SIZE, 2) * 100).astype(int)
write_to_file(input_points, "input")
plot_points(input_points, "input", False)
result = closest_pair(input_points, verbose=True)
print("\n\nRESULT: \nThe closed pair:{0} and {1}\nDistance: {2:.5f}".format(result[0], result[1], result[2]))
|
import colorgram
colors = colorgram.extract('image.jpg',6)
color_list = []
for color in (colors):
r = color.rgb.r
g = color.rgb.g
b = color.rgb.b
new_color = (r,g,b)
color_list.append(new_color)
print(color_list) |
"""
Given two arrays, write a function to compute their intersection.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2,2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [4,9]
Note:
Each element in the result should appear as many times as it shows in both arrays.
The result can be in any order.
Follow up:
What if the given array is already sorted? How would you optimize your algorithm?
What if nums1's size is small compared to nums2's size? Which algorithm is better?
What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?
"""
import collections
def intersect(nums1, nums2):
"""
Given two arrays, this function compute their intersection.
:param nums1: List(int)
:param nums2: List(int)
:return: List(int)
"""
counter_nums1 = collections.Counter(nums1)
counter_nums2 = collections.Counter(nums2)
intersection_list = list((collections.Counter(nums2) & \
collections.Counter(nums1)).keys())
res = []
for i in intersection_list:
for val in range(0,min(counter_nums1[i], counter_nums2[i])):
res.append(i)
return res
print(intersect([4, 9, 5], [9, 4, 9, 8, 4]))
|
import math
a = int(input('Enter A value:'))
b = int(input('Enter B value:'))
c = int(input('Enter C value:'))
if (a**2+b**2)==c**2:
print('Given Triangle is rectangle')
else:
print('Given Triangle is not rectangle')
|
import serial
import time
x = (input('Enter size:'+'\n'+'a.Small'+'\t\t'+'b.Big'+'\t\t')).lower()
y = (input('Enter color:'+'\n'+'a.green'+'\t\t'+'b.Red'+'\t\t')).lower()
ser = serial.Serial('COM5', 9600, timeout=1)
time.sleep(3)
if(x == "small" and y == "blue"):
ser.write(b'A')
time.sleep(1)
elif(x == "small" and y == "red"):
ser.write(b'B')
time.sleep(1)
elif(x =="big" and y=="blue"):
ser.write(b'C')
time.sleep(1)
elif(x == "big" and y == "red"):
ser.write(b'D')
time.sleep(1)
|
from django.db.models.signals import post_save, pre_save, pre_delete
from django.dispatch import receiver
from django.db.models import F
from sale.models import Stock, Transfer
@receiver(post_save, sender=Transfer)
def TransferInToStockSignal(sender, instance, created, **kwargs):
if not created:
pass
detail = {
'stock_code': instance.transfer_code,
'stock_patch': instance.transfer_patch
}
status = instance.transfer_status
commdity = Stock.objects.get_or_create(**detail)
if status in ['100001', '011002']:
commdity[0].stock_number = F('stock_number') - instance.transfer_number
if status in ['000101', '011001']:
commdity[0].stock_number = F('stock_number') + instance.transfer_number
commdity[0].save()
@receiver(pre_delete, sender=Transfer)
def TransferInToStockSignal(sender, instance, **kwargs):
detail = {
'stock_code': instance.transfer_code,
'stock_patch': instance.transfer_patch
}
status = instance.transfer_status
commdity = Stock.objects.get_or_create(**detail)
if status in ['100001', '011002']:
commdity[0].stock_number = F('stock_number') + instance.transfer_number
if status in ['000101', '011001']:
commdity[0].stock_number = F('stock_number') - instance.transfer_number
commdity[0].save()
|
# coding: utf-8
import tensorflow as tf
HIDDEN_SIZE = 1024 # LSTM的隐藏层规模。
NUM_LAYERS = 2 # 深层循环神经网络中LSTM结构的层数。
SRC_VOCAB_SIZE = 10000 # 源语言词汇表大小。
TRG_VOCAB_SIZE = 4000 # 目标语言词汇表大小。
BATCH_SIZE = 100 # 训练数据batch的大小。
NUM_EPOCH = 5 # 使用训练数据的轮数。
KEEP_PROB = 0.8 # 节点不被dropout的概率。
MAX_GRAD_NORM = 5 # 用于控制梯度膨胀的梯度大小上限。
SHARE_EMB_AND_SOFTMAX = True # 在Softmax层和词向量层之间共享参数。
MAX_LEN = 50 # 限定句子的最大单词数量。
SOS_ID = 1 # 目标语言词汇表中<sos>的ID。
# 定义NMTModel类来描述模型。
class NMTModel(object):
# 在模型的初始化函数中定义模型要用到的变量。
def __init__(self):
# 定义编码器和解码器所使用的LSTM结构。
self.enc_cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
for _ in range(NUM_LAYERS)])
self.dec_cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
for _ in range(NUM_LAYERS)])
# 为源语言和目标语言分别定义词向量。
self.src_embedding = tf.get_variable(
"src_emb", [SRC_VOCAB_SIZE, HIDDEN_SIZE])
self.trg_embedding = tf.get_variable(
"trg_emb", [TRG_VOCAB_SIZE, HIDDEN_SIZE])
# 定义softmax层的变量
if SHARE_EMB_AND_SOFTMAX:
self.softmax_weight = tf.transpose(self.trg_embedding)
else:
self.softmax_weight = tf.get_variable(
"weight", [HIDDEN_SIZE, TRG_VOCAB_SIZE])
self.softmax_bias = tf.get_variable(
"softmax_bias", [TRG_VOCAB_SIZE])
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
# 在forward函数中定义模型的前向计算图。
# src_input, src_size, trg_input, trg_label, trg_size分别是上面
# MakeSrcTrgDataset函数产生的五种张量。
def forward(self, src_input, src_size, trg_input, trg_label, trg_size):
batch_size = tf.shape(src_input)[0]
# 将输入和输出单词编号转为词向量。
src_emb = tf.nn.embedding_lookup(self.src_embedding, src_input)
trg_emb = tf.nn.embedding_lookup(self.trg_embedding, trg_input)
# 在词向量上进行dropout。
src_emb = tf.nn.dropout(src_emb, KEEP_PROB)
trg_emb = tf.nn.dropout(trg_emb, KEEP_PROB)
# 使用dynamic_rnn构造编码器。
# 编码器读取源句子每个位置的词向量,输出最后一步的隐藏状态enc_state。
# 因为编码器是一个双层LSTM,因此enc_state是一个包含两个LSTMStateTuple类
# 张量的tuple,每个LSTMStateTuple对应编码器中的一层。
# enc_outputs是顶层LSTM在每一步的输出,它的维度是[batch_size,
# max_time, HIDDEN_SIZE]。Seq2Seq模型中不需要用到enc_outputs,而
# 后面介绍的attention模型会用到它。
with tf.variable_scope("encoder"):
enc_outputs, enc_state = tf.nn.dynamic_rnn(
self.enc_cell, src_emb, src_size, dtype=tf.float32)
# 使用dyanmic_rnn构造解码器。
# 解码器读取目标句子每个位置的词向量,输出的dec_outputs为每一步
# 顶层LSTM的输出。dec_outputs的维度是 [batch_size, max_time,
# HIDDEN_SIZE]。
# initial_state=enc_state表示用编码器的输出来初始化第一步的隐藏状态。
with tf.variable_scope("decoder"):
dec_outputs, _ = tf.nn.dynamic_rnn(
self.dec_cell, trg_emb, trg_size, initial_state=enc_state)
# 计算解码器每一步的log perplexity。这一步与语言模型代码相同。
output = tf.reshape(dec_outputs, [-1, HIDDEN_SIZE])
logits = tf.matmul(output, self.softmax_weight) + self.softmax_bias
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(trg_label, [-1]), logits=logits)
# 在计算平均损失时,需要将填充位置的权重设置为0,以避免无效位置的预测干扰
# 模型的训练。
label_weights = tf.sequence_mask(trg_size, maxlen=tf.shape(trg_label)[1], dtype=tf.float32)
label_weights = tf.reshape(label_weights, [-1])
cost = tf.reduce_sum(loss * label_weights)
cost_per_token = cost / tf.reduce_sum(label_weights)
tf.summary.scalar('cost', cost_per_token) # 命名和赋值
# 定义反向传播操作。反向操作的实现与语言模型代码相同。
trainable_variables = tf.trainable_variables()
# 控制梯度大小,定义优化方法和训练步骤。
grads = tf.gradients(cost / tf.to_float(batch_size), trainable_variables)
grads, _ = tf.clip_by_global_norm(grads, MAX_GRAD_NORM)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.apply_gradients(zip(grads, trainable_variables))
self.merged = tf.summary.merge_all()
return cost_per_token, train_op
|
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
import sys
from time import sleep
def get_safety(place):
options = Options()
options.headless = True
place = place.lower()
place = place.replace(' ', '-')
# insert your chromedriver path
driver = Chrome(executable_path='###########', options=options)
gov = 'https://www.gov.pl/web/dyplomacja/'
gov = gov + place
driver.get(gov)
warning = driver.find_element_by_xpath('/html/body/main/div[2]/article/div[1]/div')
content = warning.get_attribute('innerHTML')
print(content)
get_safety(sys.argv[1]) |
import numpy as np
import pandas as pd
import log
import matplotlib.pyplot as pl
import matplotlib.dates as mdates
import datetime
DATAFRAME = pd.DataFrame.from_dict(log.DATA, orient='index')
#print(DATAFRAME)
#print(DATAFRAME["push_ups"])
def str_to_datenums( str_dates ):
datenums = []
for date_str in str_dates:
datenums.append( datetime.datetime.strptime( date_str, "%d/%m/%Y" ) )
return datenums
def get_exercise_data(exercise_name):
exercise_data = DATAFRAME["push_ups"].dropna()
dates = exercise_data.keys()
values = np.array(exercise_data.values.tolist())
return dates, values
def plot_exercise_progress(exercise_name):
dates, values = get_exercise_data(exercise_name)
fig, ax = pl.subplots( 1 )
formatter = mdates.ConciseDateFormatter( mdates.AutoDateLocator() )
ax.xaxis.set_major_formatter( formatter )
ax.plot( str_to_datenums( dates ),
values[:, 0])
pl.show()
plot_exercise_progress( "push_ups" )
|
import pydot
from Cp7_test.cmp.utils import ContainerSet
class NFA:
def __init__(self, states: int, finals: iter, transitions: dict, start=0):
self.states = states
self.start = start
self.finals = set(finals)
self.map = transitions
self.vocabulary = set()
self.transitions = {state: {} for state in range(states)}
destinations: list
origin: int
symbol: str
for (origin, symbol), destinations in transitions.items():
assert hasattr(destinations,
'__iter__'), 'Invalid collection of states'
self.transitions[origin][symbol] = destinations
self.vocabulary.add(symbol)
self.vocabulary.discard('')
def epsilon_transitions(self, state):
assert state in self.transitions, 'Invalid state'
try:
return self.transitions[state]['']
except KeyError:
return ()
def graph(self):
G = pydot.Dot(rankdir='LR', margin=0.1)
G.add_node(
pydot.Node('start', shape='plaintext', label='', width=0, height=0))
for (start, tran), destinations in self.map.items():
tran = 'ε' if tran == '' else tran
G.add_node(pydot.Node(start, shape='circle',
style='bold' if start in self.finals else ''))
for end in destinations:
G.add_node(pydot.Node(end, shape='circle',
style='bold' if end in self.finals else ''))
G.add_edge(pydot.Edge(start, end, label=tran, labeldistance=2))
G.add_edge(pydot.Edge('start', self.start, label='', style='dashed'))
return G
def _repr_svg_(self):
try:
return self.graph().create().decode('utf8')
except:
pass
class DFA(NFA):
def __init__(self, states: int, finals: list, transitions: dict, start=0):
assert all(isinstance(value, int) for value in transitions.values())
assert all(len(symbol) > 0 for origin, symbol in transitions)
transitions = {key: [value] for key, value in transitions.items()}
NFA.__init__(self, states, finals, transitions, start)
self.current = start
def epsilon_transitions(self):
raise TypeError()
def _move(self, symbol):
# Your code here
try:
self.current = self.transitions[self.current][symbol][0]
except KeyError: return False
return True
def _reset(self):
self.current = self.start
def recognize(self, string):
# Your code here
self._reset()
for char in string:
if not self._move(char):
return False
return self.current in self.finals
# region string recognize by automaton test
# automaton = DFA(states=3, finals=[2], transitions={
# (0, 'a'): 0,
# (0, 'b'): 1,
# (1, 'a'): 2,
# (1, 'b'): 1,
# (2, 'a'): 0,
# (2, 'b'): 1,
# })
#
# assert automaton.recognize('ba')
# assert automaton.recognize('aababbaba')
#
# assert not automaton.recognize('')
# assert not automaton.recognize('aabaa')
# assert not automaton.recognize('aababb')
# endregion
def move(automaton: NFA, states: iter, symbol: str):
moves = set()
# e-closure: es para un estado o cjto de estados aqiellos estados a los que
# puedo llehar con e
# move: cto de estados a los que puedo llegar partiendo de un estado con un
# simbolo determinado, e- closure puede ser tipo bfs, o sea con e desde un
# estado y para esos a los q se llegue con e tambien
# todo a los estados habria q calcularle la clausura y sobe la misma aplicar
# move, el resultado tambien formaria parte del move original, ya q a la
# clausura se puede llegar sin consumir nada y a partir de ellos podemos
# consumir la letra llegando a mas estados
for state in states:
# Your code here
try:
moves.update(automaton.transitions[state][symbol])
except: pass
return moves
def epsilon_closure(automaton, states: iter):
pending = [s for s in states]
closure = {s for s in states}
while pending:
state = pending.pop()
# Your code here
# todo no me gusta este codigo
for q in automaton.epsilon_transitions(state):
if not q in closure: # todo aqi deberia ser en la clausura
pending.append(q)
closure.add(q)
return ContainerSet(*closure)
def nfa_to_dfa(automaton):
transitions = {}
start = epsilon_closure(automaton, [automaton.start])
start.id = 0
start.is_final = any(s in automaton.finals for s in start)
states = [start]
next_id = 1
pending = [start]
while pending:
state = pending.pop()
for symbol in automaton.vocabulary:
# Your code here
next_state = ContainerSet(*move(automaton, state, symbol))
next_state.update(epsilon_closure(automaton, next_state))
if len(next_state) > 0:
if next_state not in states:
next_state.id = next_id
next_id += 1
next_state.is_final = any(s in automaton.finals for s in
next_state)
states.append(next_state)
pending.append(next_state)
else:
try:
next_state = states[states.index(next_state)]
except: raise
# .............................
try:
transitions[state.id, symbol]
assert False, 'Invalid DFA!!!'
except KeyError:
# Your code here
transitions[state.id, symbol] = next_state.id
finals = [state.id for state in states if state.is_final]
dfa = DFA(len(states), finals, transitions)
return dfa
automaton = NFA(states=6, finals=[3, 5], transitions={
(0, ''): [ 1, 2 ],
(1, ''): [ 3 ],
(1,'b'): [ 4 ],
(2,'a'): [ 4 ],
(3,'c'): [ 3 ],
(4, ''): [ 5 ],
(5,'d'): [ 5 ]
})
# region move test
# assert move(automaton, [1], 'a') == set()
# assert move(automaton, [2], 'a') == {4}
# assert move(automaton, [1, 5], 'd') == {5}
# endregion
# region e-cloure test
#
# assert epsilon_closure(automaton, [0]) == {0,1,2,3}
# assert epsilon_closure(automaton, [0, 4]) == {0,1,2,3,4,5}
# assert epsilon_closure(automaton, [1, 2, 4]) == {1,2,3,4,5}
# endregion
dfa = nfa_to_dfa(automaton)
# display(dfa)
assert dfa.states == 4
assert len(dfa.finals) == 4
assert dfa.recognize('')
assert dfa.recognize('a')
assert dfa.recognize('b')
assert dfa.recognize('cccccc')
assert dfa.recognize('adddd')
assert dfa.recognize('bdddd')
assert not dfa.recognize('dddddd')
assert not dfa.recognize('cdddd')
assert not dfa.recognize('aa')
assert not dfa.recognize('ab')
assert not dfa.recognize('ddddc')
automaton = NFA(states=3, finals=[2], transitions={
(0,'a'): [ 0 ],
(0,'b'): [ 0, 1 ],
(1,'a'): [ 2 ],
(1,'b'): [ 2 ],
})
#display(automaton)
print("No sé que lenguaje reconoce :'(")
assert move(automaton, [0, 1], 'a') == {0, 2}
assert move(automaton, [0, 1], 'b') == {0, 1, 2}
dfa = nfa_to_dfa(automaton)
#display(dfa)
assert dfa.states == 4
assert len(dfa.finals) == 2
assert dfa.recognize('aba')
assert dfa.recognize('bb')
assert dfa.recognize('aaaaaaaaaaaba')
assert not dfa.recognize('aaa')
assert not dfa.recognize('ab')
assert not dfa.recognize('b')
assert not dfa.recognize('')
automaton = NFA(states=5, finals=[4], transitions={
(0,'a'): [ 0, 1 ],
(0,'b'): [ 0, 2 ],
(0,'c'): [ 0, 3 ],
(1,'a'): [ 1, 4 ],
(1,'b'): [ 1 ],
(1,'c'): [ 1 ],
(2,'a'): [ 2 ],
(2,'b'): [ 2, 4 ],
(2,'c'): [ 2 ],
(3,'a'): [ 3 ],
(3,'b'): [ 3 ],
(3,'c'): [ 3, 4 ],
})
#display(automaton)
print("No sé que lenguaje reconoce :'(")
dfa = nfa_to_dfa(automaton)
#display(dfa)
assert dfa.states == 15
assert len(dfa.finals) == 7
assert dfa.recognize('abccac')
assert dfa.recognize('bbbbbbbbaa')
assert dfa.recognize('cac')
assert not dfa.recognize('abbbbc')
assert not dfa.recognize('a')
assert not dfa.recognize('')
assert not dfa.recognize('acacacaccab') |
'''
Created on Oct 13, 2015
@author: Jonathan Yu
'''
def getFortunate(a, b, c):
sums = []
for i in a:
for j in b:
for k in c:
sums.append(i + j + k)
num = 0
for sum in set(sums):
isFortunate = True
for digit in str(sum):
if not ((digit == "5") | (digit == "8")):
isFortunate = False
break
if isFortunate:
num += 1
return num
if __name__ == '__main__':
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'cliff',
'datakit-core',
'requests'
]
test_requirements = [
'pytest'
]
setup(
name='datakit-dworld',
version='0.2.0',
description="Commands to manage project integration with data.world.",
long_description=readme + '\n\n' + history,
author="Justin Myers",
author_email='jmyers@ap.org',
url='https://github.com/associatedpress/datakit-dworld',
packages=find_packages(),
data_files=(
(
'datakit_dworld/assets',
('datakit_dworld/assets/summary_template.md',)),
),
include_package_data=True,
entry_points={
'datakit.plugins': [
'dworld create= datakit_dworld.create:Create',
'dworld push= datakit_dworld.push:Push',
'dworld summary= datakit_dworld.summary:Summary',
]
},
install_requires=requirements,
license="ISC license",
zip_safe=False,
keywords='datakit-dworld',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
|
import os
import re
################## CHANGE HERE ONLY ##################
# The name of module you want
name_module = "adc_list"
# Brief description of module
description_module = "Analog-to-Digital Converter (ADC)"
# name_driver = "stm_driver.h"
# The path of repository
ksdk_path = "e:/C55SDK/sdk_codebase/"
######################################################
# Please dont change any line below
# Unix standard
unix_standard = '\n'
# collect data from file pattern
data = []
def write_body_file_item(dataLine):
for line in dataLine:
if line.count("NameModule") > 0:
line = line.replace("NameModule", name_module)
if line.count("ModuleNameUpper") > 0:
line = line.replace("ModuleNameUpper", name_module.upper())
if line.count("DescriptionModule") > 0:
line = line.replace("DescriptionModule", description_module)
data.append(line)
def update_and_write_data_to_file(line_ending):
for line in data:
line = line.replace("\r\n", line_ending)
file_export.write(line)
file_path_export = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/" + name_module + "/" + name_module + ".bean")
# file_path_export = os.path.join(os.getcwd(), name_module + "Methods.html")
directory_export = os.path.dirname(file_path_export)
if not os.path.exists(directory_export):
os.makedirs(directory_export)
file_pattern = open("module.bean", "rb").readlines()
file_export = open(file_path_export, "wb")
write_body_file_item(file_pattern)
update_and_write_data_to_file(unix_standard)
file_export.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Any, Callable, Iterator, Tuple, Type
import pytest
from flask import Flask
from marshmallow import Schema
from smorest_sfs.modules.email_templates.models import EmailTemplate
@pytest.fixture
def email_template_items(
flask_app: Flask, temp_db_instance_helper: Callable[..., Iterator[Any]],
) -> Iterator[Tuple[EmailTemplate, EmailTemplate, EmailTemplate]]:
# pylint: disable=W0613
for _ in temp_db_instance_helper(
*(EmailTemplate(name=str(_), template="qq") for _ in range(3))
):
yield _
@pytest.fixture
def EmailTemplateSchema(flask_app: Flask) -> Type[Schema]:
# pylint: disable=W0621, W0613
from smorest_sfs.modules.email_templates.schemas import EmailTemplateSchema
return EmailTemplateSchema
|
# -*- coding:utf-8 -*-
"""
请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有
字符的路径。路径可以从矩阵中的任意一个格子开始,每一步可以在矩阵
中向左,向右,向上,向下移动一个格子。如果一条路径经过了矩阵中的
某一个格子,则之后不能再次进入这个格子。 例如 a b c e s f c s
a d e e 这样的3 X 4 矩阵中包含一条字符串"bcced"的路径,但是矩
阵中不包含"abcb"路径,因为字符串的第一个字符b占据了矩阵中的第一
行第二个格子之后,路径不能再次进入该格子。
"""
class Solution:
def __init__(self):
self.b = False
def hasPath(self, matrix1, rows, cols, path):
# write code here
matrix = [[""] * cols for i in range(rows)]
for i in range(rows):
for j in range(cols):
matrix[i][j] = matrix1[i * cols + j]
if not path:
return True
if not matrix:
return False
for i in range(rows):
for j in range(cols):
if matrix[i][j] == path[0]:
self.matchpath(matrix, i, j, [(i, j)], path[1:])
if self.b:
return True
return False
def matchpath(self, matrix, row, col, dict, path):
if len(path) == 0:
self.b = True
return
if row - 1 >= 0 and matrix[row - 1][col] == path[0] and (row - 1, col) not in dict:
self.matchpath(matrix, row - 1, col, dict + [(row - 1, col)], path[1:])
if row + 1 < len(matrix) and matrix[row + 1][col] == path[0] and (row + 1, col) not in dict:
self.matchpath(matrix, row + 1, col, dict + [(row + 1, col)], path[1:])
if col - 1 >= 0 and matrix[row][col - 1] == path[0] and (row, col - 1) not in dict:
self.matchpath(matrix, row, col - 1, dict + [(row, col - 1)], path[1:])
if col + 1 < len(matrix[0]) and matrix[row][col + 1] == path[0] and (row, col + 1) not in dict:
self.matchpath(matrix, row, col + 1, dict + [(row, col + 1)], path[1:])
s = Solution()
print(s.hasPath("ABCEHJIGSFCSLOPQADEEMNOEADIDEJFMVCEIFGGS",5,8,"SLHECCEIDEJFGGFIE")) |
# Generated by Django 2.0.7 on 2019-01-14 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basedata', '0059_auto_20190114_1712'),
]
operations = [
migrations.AlterField(
model_name='device',
name='Inquiry_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='询价单价'),
),
migrations.AlterField(
model_name='device',
name='buy_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='采购单价'),
),
migrations.AlterField(
model_name='device',
name='sale_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='单价'),
),
migrations.AlterField(
model_name='device',
name='total_buy_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='采购金额'),
),
migrations.AlterField(
model_name='device',
name='total_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='金额'),
),
migrations.AlterField(
model_name='device_form',
name='total_buyprice',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='合计采购金额'),
),
migrations.AlterField(
model_name='device_form',
name='total_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=8, null=True, verbose_name='合计金额'),
),
migrations.AlterField(
model_name='feedback_form',
name='bonus',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=9, max_length=8, null=True, verbose_name='项目经理奖励金额'),
),
migrations.AlterField(
model_name='project',
name='total_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=10, null=True, verbose_name='合同金额'),
),
]
|
# class A(object):
# def foo(self, x):
# print("executing foo(%s,%s)" % (self, x))
#
# @classmethod
# def class_foo(cls, x):
# print("executing class_foo(%s,%s)" % (cls, x))
#
# @staticmethod
# def static_foo(x):
# print("executing static_foo(%s)" % x)
#
#
# a = A()
# a.foo(2)
# A.class_foo(2)
# A.static_foo(2)
#
# def print_everything(*args):
# for count, thing in enumerate(args):
# print('{0}.{1}'.format(count, thing))
# print_everything('apple', 'banana', 'cabbage')
#
# import copy
# a = [1,2,3,4, ['a','b']]
# b = a # 直接赋值,传递对象的引用
# c = copy.copy(a) # 浅拷贝,没有拷贝子对象,所以原始数据改变,子对象会改变
# d = copy.deepcopy(a) # 深拷贝,包含对象里面的子对象的拷贝,所以原始对像改变不会造成深拷贝里任何子元素改变
#
# a.append(5)
# a[4].append('c')
#
# print('a:',a)
# print('b:',b)
# print('c:',c)
# print('d:',d)
#
#
# def print_directory_content(sPath):
# import os
# for sChild in os.listdir(sPath):
# sChildPath = os.path.join(sPath, sChild)
# if os.path.isdir(sChildPath):
# print_directory_content(sChildPath)
# else:
# print(sChildPath)
# print_directory_content("F:\重要文件")
# class Node(object):
# def __init__(self,sName):
# self._lChildren = []
# self.sName = sName
# def __repr__(self):
# return "<Node '{}'>".format(self.sName)
# def append(self,*args,**kwargs):
# self._lChildren.append(*args,**kwargs)
# def print_all_1(self):
# print(self)
# for oChild in self._lChildren:
# oChild.print_all_1()
# def print_all_2(self):
# def gen(o):
# lAll = [o,]
# while lAll:
# oNext = lAll.pop(0)
# lAll.extend(oNext._lChildren)
# yield oNext
# for oNode in gen(self):
# print(oNode)
#
# oRoot = Node("root")
# oChild1 = Node("child1")
# oChild2 = Node("child2")
# oChild3 = Node("child3")
# oChild4 = Node("child4")
# oChild5 = Node("child5")
# oChild6 = Node("child6")
# oChild7 = Node("child7")
# oChild8 = Node("child8")
# oChild9 = Node("child9")
# oChild10 = Node("child10")
#
# oRoot.append(oChild1)
# oRoot.append(oChild2)
# oRoot.append(oChild3)
# oChild1.append(oChild4)
# oChild1.append(oChild5)
# oChild2.append(oChild6)
# oChild4.append(oChild7)
# oChild3.append(oChild8)
# oChild3.append(oChild9)
# oChild6.append(oChild10)
#
# oRoot.print_all_1()
# oRoot.print_all_2()
# def f1(lIn):
# l1 = sorted(lIn)
# l2 = [i for i in l1 if i<0.5]
# return [i*i for i in l2]
#
# def f2(lIn):
# l1 = [i for i in lIn if i<0.5]
# l2 = sorted(l1)
# return [i*i for i in l2]
#
# def f3(lIn):
# l1 = [i*i for i in lIn]
# l2 = sorted(l1)
# return [i for i in l2 if i<(0.5*0.5)]
#
# import cProfile # 分析代码性能
# import random
# lIn = [random.random() for i in range(100000)]
# cProfile.run('f1(lIn)')
# cProfile.run('f2(lIn)')
# cProfile.run('f3(lIn)')
l1 = ['b','c','d','b','c','a','a']
l2 = list(set(l1))
l2.sort(key=l1.index)
print(l1.index)
print(l2)
|
from math import ceil
from function import *
from equally_spaced_integration import newton_cotes, A, B, EXACT_VALUE, FIFTEEN_POINTS, FIFTEEN_POINTS_COEF
from gauss_integration import composite_gauss, SEVEN_NODES, SEVEN_WEIGHTS
COTES_15_P = 16
GAUSS_7_P = 14
def runge_rule_newton_cotes(p, accuracy):
N1 = 1
N2 = 2
h1 = (B - A) / N1
q1 = newton_cotes(f, A, B, h1, FIFTEEN_POINTS, FIFTEEN_POINTS_COEF)
count = 0
while True:
count += 1
h2 = (B - A) / N2
q2 = newton_cotes(f, A, B, h2, FIFTEEN_POINTS, FIFTEEN_POINTS_COEF)
R = ((q2 - q1) * h2 ** p) / (h1 ** p - h2 ** p)
if abs(R) <= accuracy:
return q2, count
N = int(ceil((abs(R) / accuracy) ** (1 / p) * N2))
N1 = N2
h1 = h2
q1 = q2
N2 = N
def runge_rule_gauss(p, accuracy):
N1 = 1
N2 = 2
h1 = (B - A) / N1
q1 = composite_gauss(f, A, B, SEVEN_NODES, SEVEN_WEIGHTS, h1)
count = 0
while True:
count += 1
h2 = (B - A) / N2
q2 = composite_gauss(f, A, B, SEVEN_NODES, SEVEN_WEIGHTS, h2)
R = ((q2 - q1) * h2 ** p) / (h1 ** p - h2 ** p)
if abs(R) <= accuracy:
return q2, count
N = int(ceil((abs(R) / accuracy) ** (1 / p) * N2))
N1 = N2
h1 = h2
q1 = q2
N2 = N
if __name__ == "__main__":
with open('report.txt', 'a') as report_file:
report_file.write("\n")
report_file.write("Runge rule (TASK 15):\n")
_, count = runge_rule_newton_cotes(COTES_15_P, 10e-15)
report_file.write("Newton-Cotes15: {0} steps\n".format(count))
_, count = runge_rule_gauss(GAUSS_7_P, 10e-15)
report_file.write("Gauss7: {0} steps\n".format(count))
|
import cv2
import numpy as np
import time
#Select any model you want to use for inferencing
#net = cv2.dnn.readNet('YoloV4-Tiny/yoloV4_best.weights', 'YoloV4-Tiny/y4_tiny_cfg.cfg')
#net = cv2.dnn.readNet('YoloV4_Iterations_1000/yolov4-obj_last.weights', 'YoloV4_Iterations_1000/yolov4-obj.cfg')
net = cv2.dnn.readNet('YoloV3-Tiny/yolov3-tiny_best.weights', 'YoloV3-Tiny/yolov3-tiny.cfg')
classes = ["Plastic"]
#Bounding box Style
color = (204,204,12)
font = cv2.FONT_HERSHEY_PLAIN
#Predicting Function
#Getting Inputs Camera image,REcording or not, Confidence level of the predictions
def detected_image(img,record_status,conf):
height, width, _ = img.shape
blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), (0,0,0), swapRB=True, crop=False)
net.setInput(blob)
output_layers_names = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_names)
boxes = []
confidences = []
class_ids = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.2:
center_x = int(detection[0]*width)
center_y = int(detection[1]*height)
w = int(detection[2]*width)
h = int(detection[3]*height)
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x, y, w, h])
confidences.append((float(confidence)))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences,conf, 0.4)
if len(indexes)>0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i],2))
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.rectangle(img, (x-1,y-30), (x+200, y),color, -1)
cv2.putText(img, label + " " + confidence, (x+1, y-5), font,2, (255,255,255),2)
img=cv2.resize(img,(1003,503))
if record_status==1:
cv2.putText(img,"Rec", (15, 38), font,2, (0,0,255),2)
cv2.line(img,(8,8),(8,38),(0,0,255),3)
cv2.line(img,(8,8),(38,8),(0,0,255),3)
cv2.line(img,(995,8),(995,38),(0,0,255),3)
cv2.line(img,(995,8),(965,8),(0,0,255),3)
cv2.line(img,(8,495),(8,465),(0,0,255),3)
cv2.line(img,(8,495),(38,495),(0,0,255),3)
cv2.line(img,(995,495),(995,465),(0,0,255),3)
cv2.line(img,(995,495),(965,495),(0,0,255),3)
return img,len(indexes)
|
#!/usr/bin/env python3
import pyaudio
import struct
from math import pi, sin
import sys
SAMPLE_RATE = int(48 * 1000) # hertz
WAVE_DURATION = 0.25 # seconds
class Tone:
"""Represents a sine wave for a given frequency.
freq - frequency (in hertz)
duration - duration of the wave (in seconds)
sample_rate - sampling rate (in hertz)
"""
def __init__(self, freq, duration, sample_rate):
self.freq = freq
self.duration = duration
self.sample_rate = sample_rate
self.generate_wave()
self.generate_buffer()
def generate_wave(self):
wave_len = int(self.duration * self.sample_rate)
period = self.sample_rate / self.freq
self.wave = [sin(2 * pi * i / period) * 127 for i in range(wave_len)]
def generate_buffer(self):
self.buffer = struct.pack("f" * len(self.wave), *self.wave)
def make_tone(freq):
return Tone(freq, WAVE_DURATION, SAMPLE_RATE)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: ./talk.py start_freq")
sys.exit(1)
try:
start_freq = int(sys.argv[1])
except ValueError:
print("bad starting frequency, please provide a number in Hz (e.g. 2000)")
sys.exit(1)
freqs = [start_freq, start_freq + 2000]
tones = [make_tone(freq) for freq in freqs]
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=SAMPLE_RATE, \
frames_per_buffer=int(WAVE_DURATION * SAMPLE_RATE), output=True)
# play data from stdin
while True:
bytes = sys.stdin.buffer.read(1)
if len(bytes) <= 0:
break
byte = bytes[0]
bits = [1 if (byte & (1 << i)) != 0 else 0 for i in range(7, -1, -1)]
str_b = chr(byte) if 32 <= byte <= 126 else "(unsafe)"
print("{} (n={})".format(str_b, byte))
for b in bits:
print("\t{}".format(b), end="")
sys.stdout.flush()
stream.write(tones[b].buffer)
print("") # newline
stream.stop_stream()
stream.close()
p.terminate()
|
class Solution(object):
def swapPairs(self, head):
dummy = ListNode(0)
dummy.next, ptr = head, dummy
while ptr.next and ptr.next.next:
tmp = ptr.next.next
ptr.next.next = tmp.next
tmp.next = ptr.next
ptr.next = tmp
ptr = ptr.next.next
return dummy.next
|
from ._base import IndexStrategy
__all__ = ('IndexStrategy',)
|
import h5py
from scipy import sparse
import query_chembl
hf = h5py.File("data/cdk2.h5", "r")
ids = list(hf["chembl_id"].value) # the name of each molecules
smiles = query_chembl.get_smiles(ids[:1])
print(smiles) |
"""
The following are helper functions to handle the volumes of a Notebook.
The new API Volume will work with objects of the following format:
volume:
mount: "mount path"
newPvc?:
metadata: ...
spec: ...
existingSource?:
nfs?: ...
persistentVolumeClaim?: ...
...
These functions will parse such objects and map them to K8s constructs.
"""
from kubernetes import client
from werkzeug.exceptions import BadRequest
from kubeflow.kubeflow.crud_backend import api, logging
from . import utils
log = logging.getLogger(__name__)
PVC_SOURCE = "persistentVolumeClaim"
EXISTING_SOURCE = "existingSource"
NEW_PVC = "newPvc"
MOUNT = "mount"
NAME = "name"
def check_volume_format(api_volume):
"""
Ensure that the JSON object received has the expected structure.
api_volume: The JSON API Volume object
"""
if MOUNT not in api_volume:
raise BadRequest("Volume should have a mount: %s" % api_volume)
if EXISTING_SOURCE not in api_volume and NEW_PVC not in api_volume:
raise BadRequest("Volume has neither %s nor %s: %s"
% (EXISTING_SOURCE, NEW_PVC, api_volume))
if EXISTING_SOURCE in api_volume and NEW_PVC in api_volume:
raise BadRequest("Volume has both %s and %s: %s"
% (EXISTING_SOURCE, NEW_PVC, api_volume))
def get_volume_name(api_volume):
"""
Return the name of the K8s V1Volume given an API volume with an existing
source.
api_volume: The API Volume submitted from client/UI
"""
# if the volume source is an existing PVC then use the requested PVC's name
# as the V1Volume.name
if EXISTING_SOURCE not in api_volume:
raise BadRequest("Failed to retrieve a volume name from '%s'"
% api_volume)
if PVC_SOURCE in api_volume[EXISTING_SOURCE]:
if "claimName" not in api_volume[EXISTING_SOURCE][PVC_SOURCE]:
raise BadRequest("Failed to retrieve the PVC name from '%s'"
% api_volume)
return api_volume[EXISTING_SOURCE][PVC_SOURCE]["claimName"]
# A user requested a different source for the V1Volume. In this case we
# use a randomly generated name
return "existing-source-volume-%s" % utils.random_string(8)
def get_pod_volume(api_volume, pvc):
"""
Return a V1Volume dict object based on the API Volume the client/UI sent.
api_volume: The API Volume submitted from client/UI
pvc: The created PVC, whose spec was defined in the api_volume
"""
check_volume_format(api_volume)
if pvc is not None:
# Mount a new PVC. We use the created PVC since the api_volume.newPvc
# spec might have defined a metadata.generateName. In this case, the
# name is set after the creation of the PVC
return {"name": pvc.metadata.name,
"persistentVolumeClaim": {"claimName": pvc.metadata.name}}
# User has explicitly asked to use an existing volume source
v1_volume = {"name": get_volume_name(api_volume)}
v1_volume.update(api_volume[EXISTING_SOURCE])
return v1_volume
def get_container_mount(api_volume, volume_name):
"""
Return a V1VolumeMount dict object from the request's JSON API Volume
api_volume: The API Volume submitted from client/UI
volume_name: The name of the V1Volume which the mount should refer to
"""
check_volume_format(api_volume)
return {"name": volume_name, "mountPath": api_volume["mount"]}
def get_new_pvc(api_volume) -> client.V1PersistentVolumeClaim:
"""
Return a V1PersistentVolumeClaim dict object from the request's JSON
API Volume.
api_volume: The JSON V1Volume object, in cammelCase as defined in the docs
"""
check_volume_format(api_volume)
if NEW_PVC not in api_volume:
return None
pvc = api.deserialize(api_volume[NEW_PVC], "V1PersistentVolumeClaim")
# don't allow users to explicitly set the Namespace
if pvc.metadata.namespace is not None:
raise BadRequest("PVC should not specify the namespace.")
return pvc
def add_notebook_volume(notebook, volume):
"""
Add the provided podvolume (dict V1Volume) to the Notebook's PodSpec.
notebook: Notebook CR dict
volume: Podvolume dict
"""
podspec = notebook["spec"]["template"]["spec"]
if "volumes" not in podspec:
podspec["volumes"] = []
podspec["volumes"].append(volume)
return notebook
def add_notebook_container_mount(notebook, container_mount):
"""
Add the provided container mount (dict V1VolumeMount) to the Notebook's
PodSpec.
notebook: Notebook CR dict
volume: Podvolume dict
"""
container = notebook["spec"]["template"]["spec"]["containers"][0]
if "volumeMounts" not in container:
container["volumeMounts"] = []
container["volumeMounts"].append(container_mount)
return notebook
|
__version__ = '4.24.10' |
import csv
import pickle
from keras import Sequential
from sklearn import metrics
import numpy as np
from keras.models import Sequential
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.layers import Dense, Activation, Flatten
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
class Serialization:
@staticmethod
def save_obj(obj, name):
"""
serialization of an object
:param obj: object to serialize
:param name: file name to store the object
"""
with open('pickle/' + name + '.pkl', 'wb') as fout:
pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)
# end with
# end def
@staticmethod
def load_obj(name):
"""
de-serialization of an object
:param name: file name to load the object from
"""
with open(name + '.pkl', 'rb') as fout:
return pickle.load(fout)
# end with
# end def
# end class
def infer_definitions_vad(filename):
w2e = Serialization.load_obj('../pickle/lexicon2embeddings')
d2e = Serialization.load_obj('../pickle/definition2embeddings')
d2v = extract_definitions_metrics(w2e, d2e, Serialization.load_obj('pickle/v.dict'), 'v')
#d2a = extract_definitions_metrics(w2e, d2e, Serialization.load_obj('pickle/a.dict'), 'a')
#d2d = extract_definitions_metrics(w2e, d2e, Serialization.load_obj('pickle/d.dict'), 'd')
'''
with open(filename, 'r') as fin, open(filename.replace('csv', 'vad-embeddings.nn.csv'), 'w') as fout:
csv_reader = csv.reader(fin, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer = csv.writer(fout, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(next(csv_reader) + ['V', 'A', 'D'])
for line in csv_reader:
definition = line[1].strip()
csv_writer.writerow(line + [d2v[definition], d2a[definition], d2d[definition]])
# end for
# end with
'''
# end def
def extract_train_test_data(w2e, d2e, vad_dict):
x_train = list(); y_train = list()
for word in w2e:
x_train.append(w2e[word])
y_train.append(vad_dict[word])
# end for
x_test = list(); defs = list()
for definition in d2e:
x_test.append(d2e[definition])
defs.append(definition)
# end for
return x_train, y_train, x_test, defs
# end def
def extract_definitions_metrics(w2e, d2e, vad_dict, type):
# https://towardsdatascience.com/deep-neural-networks-for-regression-problems-81321897ca33
x_train, y_train, x_test, defs = extract_train_test_data(w2e, d2e, vad_dict)
print('extracted train-test data:', len(x_train), len(y_train), len(x_test))
'''
print(np.array(x_train).shape)
print('building and training regression nn model...')
model = build_nn_model(np.array(x_train).shape, training=True)
train_model(model, np.array(x_train), np.array(y_train))
'''
model = build_nn_model(np.array(x_train).shape, training=False)
model.load_weights(dirname + 'model-weights-017--0.0934388.hdf4') # load best model
model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
y_predict = model.predict(np.array(x_train)) # how good is the fit of train data
y_predict = [1.0 if y > 1.0 else 0 if y < 0.0 else y for y in y_predict]
print('r^2 on train data:', metrics.r2_score(y_train, y_predict))
#return extract_def_predictions(model, x_test, defs)
# end def
def extract_def_predictions(model, x_test, defs):
d2v = dict()
y_test_predict = model.predict(np.array(x_test))
for definition, val in zip(defs, y_test_predict):
assert(0.0 <= val <= 1.0), 'predicted value is not proportion'
d2v[definition] = val
# end for
return d2v
# end def
def build_nn_model(dimensions, training=True):
model = Sequential()
print('building sequential model with input dim:', dimensions[1])
model.add(Dense(512, kernel_initializer='normal', input_dim=dimensions[1], activation='relu'))
if training: model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(256, kernel_initializer='normal', activation='relu'))
if training: model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(128, kernel_initializer='normal', activation='relu'))
if training: model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Dense(1, kernel_initializer='normal', activation='linear'))
model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
model.summary()
return model
# end def
def train_model(model, x, y):
checkpoint_name = dirname + 'model-weights-{epoch:03d}--{val_loss:.7f}.hdf4'
checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)
callbacks_list = [checkpoint, es]
model.fit(x, y, epochs=300, batch_size=32, validation_split=0.2, callbacks=callbacks_list)
# end def
VAD_INDEX = 2 # 0-2
dirname = '<working dir>/gender-idioms/model-checkpoints/'
excluded = ['red herring', 'red carpet']
if __name__ == '__main__':
infer_definitions_vad('../idioms-definitions-final-counts.csv')
# end if
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 19:20:15 2019
@author: ghosty
"""
import os
from datetime import datetime
import pytz
import h5py
import csv
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
#read filename
stamp = '1541962108935000000_167_838.h5'
filename = stamp
stamp = stamp[0:19]
stamp = int(stamp)/10**9
#convert time
dt = datetime.fromtimestamp(stamp)
tz=pytz.timezone('CET')
utc_dt = dt.astimezone(pytz.utc)
cern_dt = dt.astimezone(tz)
print('UTC: '+str(utc_dt))
print('CERN: '+str(cern_dt))
#read hdf5 file
try:
os.rename("/home/ghosty/gsoc/python_awake/1541962108935000000_167_838.h5", "/home/ghosty/gsoc/python_awake/1541962108935000000_167_838.hdf5")
f = h5py.File("/home/ghosty/gsoc/python_awake/1541962108935000000_167_838.hdf5", 'r')
os.rename("/home/ghosty/gsoc/python_awake/1541962108935000000_167_838.hdf5", "/home/ghosty/gsoc/python_awake/1541962108935000000_167_838.h5")
except:
f = h5py.File("/home/ghosty/gsoc/python_awake/1541962108935000000_167_838.hdf5", 'r')
def foo(name, obj):
print(name, obj)
return None
dd=[]
#csv file headers
row = ['Groups', ' Dataset Name', ' Size', 'Shape', 'Type']
dd.append(row)
#function to check if a detaset has been encountered, if true, append its attributes to a list
def extract(name, node):
if isinstance(node, h5py.Dataset):
temp = []
temp.append(name)
xx = name.split('/')
temp.append(xx[-1])
size = node.size
temp.append(str(size))
shape = node.shape
temp.append(str(shape))
try:
dtype = str(node.dtype)
temp.append(str(dtype))
except:
1==1
dd.append(temp)
return None
f.visititems(extract)
#write the data to a csv file
with open('data.csv', 'w') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(dd)
#reading the image data
dp = f['/AwakeEventData/XMPP-STREAK/StreakImage/streakImageData']
dh = f['/AwakeEventData/XMPP-STREAK/StreakImage/streakImageHeight']
dw = f['/AwakeEventData/XMPP-STREAK/StreakImage/streakImageWidth']
dp = dp[...]
dh = dh[...]
dw = dw[...]
dp = np.reshape(dp,(int(dh),int(dw)))
dp = scipy.signal.medfilt(dp)
plt.imshow(dp)
plt.imsave('img.png',dp)
|
import json
with open('pref.json') as f:
data = json.load(f)
streamUrl = data["informations"]["STREAMURL"]
streamerName = data["informations"]["STREAMERNAME"] |
import os
import cv2
import json
import argparse
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
def init_coco_dict():
return {
'info': {
'description': 'Kyoto pedestrian dataset',
'url': 'https://www.vision.rwth-aachen.de/page/mots',
'version': '1.0',
'year': 2020,
'contributor': 'chenyu',
'date_created': '2020/5/2'
},
'licenses': [
{
'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/',
'id': 1,
'name': 'Attribution-NonCommercial-ShareAlike License'
}
],
'images': [],
'annotations': [],
'categories': [
{
'supercategory': 'person',
'id': 1,
'name': 'person'
}
]
}
def to_coco(root_path):
train_dict = init_coco_dict()
val_dict = init_coco_dict()
image_count = 0
instance_count = 0
cut_y=80
images_path = os.path.join(root_path, 'raw_frames')
label_path = os.path.join(root_path, 'annotations', 'raw_labels')
labels = os.listdir(label_path)
labels.sort()
for label in labels:
id_pool = []
print('Processing label {}'.format(label))
o = open(os.path.join(label_path, label), 'r')
label_base_name = os.path.splitext(label)[0]
lines = o.readlines()
imgs_num = len(lines)
for line in lines:
line_list = line.split(',')
frame_num = line_list[0]
image_count += 1
frame_img = label_base_name + '_{:06d}.jpg'.format(int(frame_num))
image_dict = {
'license': 1,
'file_name': frame_img,
'coco_url': '',
'height': 640,
'width': 640,
'date_captured': '',
'flickr_url': '',
'id': image_count
}
# train_dict['images'].append(image_dict)
if image_count >= 0.8 * imgs_num:
val_dict['images'].append(image_dict)
else:
train_dict['images'].append(image_dict)
obj_num = line_list[1]
for obj in range(int(obj_num)):
print(obj)
instance_count += 1
bbox = line_list[3 + 6 * obj:7 + 6 * obj]
instance_dict = {
'iscrowd': 0,
'image_id': image_count,
'category_id': 1,
'id': instance_count
}
instance_dict['segmentation'] = []
x1 = int(bbox[0])
y1 = int(bbox[1])-cut_y
w = int(bbox[2])
h = int(bbox[3])
instance_dict['bbox'] = [x1, y1, w, h]
if image_count >= 0.8 * imgs_num:
val_dict['annotations'].append(instance_dict)
else:
train_dict['annotations'].append(instance_dict)
# image = image / 255.
# plt.imshow(image)
# plt.show()
# break
json.dump(train_dict, open(root_path+'annotations/'+'instances_train.json', 'w+'))
json.dump(val_dict, open(root_path+'annotations/'+'instances_val.json', 'w+'))
if __name__ == '__main__':
to_coco('/home/rvlab/Documents/DRDvideo_processed/')
|
# Load CSV using Pandas
import pandas as pd
filename = 'pima-indians-diabetes.data.csv'
data = pd.read_csv(filename)
skew = data.skew()
print(skew)
|
class BQueue:
def __init__(self):
self.q = []
self.rear = 0
self.front = 0
def enqueue(self, i):
self.q.append(i)
self.rear = self.rear + 1
def dequeue(self):
if self.rear == self.front:
print("Queue is empty")
else:
self.front = self.front + 1
return self.q[self.front - 1]
def printq(self):
if self.rear == self.front:
print("Queue is empty")
else:
print(self.q[self.front:self.rear])
q1 = BQueue()
q1.printq()
q1.enqueue(10)
q1.enqueue(100)
q1.printq()
q1.enqueue(1000)
q1.enqueue(10000)
q1.printq()
q1.enqueue(100000)
q1.printq()
q1.dequeue()
q1.printq()
q1.dequeue()
q1.printq()
q1.dequeue()
q1.printq()
q1.dequeue()
q1.printq()
q1.dequeue()
q1.printq()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from conans.model import Generator
from conans.client.generators import VisualStudioGenerator
from xml.dom import minidom
from conans.util.files import load
class VisualStudioMultiGenerator(Generator):
template = """<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup />
<ItemGroup />
</Project>
"""
@property
def filename(self):
pass
@property
def content(self):
configuration = str(self.conanfile.settings.build_type)
platform = {'x86': 'Win32', 'x86_64': 'x64'}.get(str(self.conanfile.settings.arch))
vsversion = str(self.settings.compiler.version)
# there is also ClCompile.RuntimeLibrary, but it's handling is a bit complicated, so skipping for now
condition = " '$(Configuration)' == '%s' And '$(Platform)' == '%s' And '$(VisualStudioVersion)' == '%s' "\
% (configuration, platform, vsversion + '.0')
name_multi = 'conanbuildinfo_multi.props'
name_current = ('conanbuildinfo_%s_%s_%s.props' % (configuration, platform, vsversion)).lower()
multi_path = os.path.join(self.output_path, name_multi)
if os.path.isfile(multi_path):
content_multi = load(multi_path)
else:
content_multi = self.template
dom = minidom.parseString(content_multi)
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', name_current)
import_group = dom.getElementsByTagName('ImportGroup')[0]
children = import_group.getElementsByTagName("Import")
for node in children:
if name_current == node.getAttribute("Project") and condition == node.getAttribute("Condition"):
break
else:
import_group.appendChild(import_node)
content_multi = dom.toprettyxml()
content_multi = "\n".join(line for line in content_multi.splitlines() if line.strip())
vs_generator = VisualStudioGenerator(self.conanfile)
content_current = vs_generator.content
return {name_multi: content_multi, name_current: content_current}
|
import cv2
import numpy
import sqlite3
import os
#Insert hoac Update vao Sqlite
def insertOrUpdate(id, name, age, gender):
conn = sqlite3.connect('/Users/vubao/OneDrive/Máy tính/SQLiteStudio/Data.db')
query = "Select * from people Where ID = "+str(id)
cusror = conn.execute(query)
isRecordExist = 0
for row in cusror:
isRecordExist = 1
if(isRecordExist == 0):
query = "Insert into people(id, Name, Age, Gender) values("+str(id)+", '"+str(name)+"', '"+str(age)+"','"+str(gender)+"')"
else:
query = "Update people set Name = '"+str(name)+"', Age = '"+str(age)+"', Gender = '"+str(gender)+"' Where ID = "+str(id)
conn.execute(query)
conn.commit()
conn.close()
#insert vao db
id = input("Enter your ID: ")
name = input("Enter your Name: ")
age = input("Enter your Age: ")
gender = input("Enter your Gender: ")
insertOrUpdate(id, name, age, gender)
#load tv
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
sampleNum = 0
while(True):
#camera ghi hinh
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
for(x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)
if not os.path.exists('dataSet'):
os.makedirs('dataSet')
#So anh lay tang dan
sampleNum +=1
#Luu anh da chup khuon mat vao file du lieu
cv2.imwrite('dataSet/User.'+str(id)+'.'+str(sampleNum)+ '.jpg', gray[y:y+h,x:x+w])
cv2.imshow('frame', frame)
cv2.waitKey(1)
#Thoat ra neu so anh nhieu hon 208
if sampleNum > 200:
break
cap.release()
cv2.destroyAllWindows()
|
import socket
from contextlib import closing
from http.server import BaseHTTPRequestHandler
from urllib import parse
import json
# Ugggg
# For use when a service provider doesn't account for ephemeral port numbers
# in redirect uris and so you have to explicitly register uris with ports.
_rando_reg_ports = [51283, 58641, 60089]
def try_get_free_port():
for p in _rando_reg_ports:
if port_in_use(p) == False:
return p
raise SystemError('Couldn\'t obtain a free registered port.')
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def port_in_use(port_number):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port_number))
except OSError as e:
if e.errno == 98: ## address already bound
return True
raise e
return False
class MyHttpServerBaseHandler(BaseHTTPRequestHandler):
def send_success_response(self, response_content_string='',
extra_headers={}, code=200):
response_len = len(response_content_string)
self.send_response(code)
self.send_header("Content-Length", str(response_len))
for k, v in extra_headers.items():
self.send_header(k, v)
self.end_headers()
if response_len > 0:
self.wfile.write(response_content_string.encode())
def query_string_to_dict_without_lists(query_string):
return dict([(key, val) if len(val) > 1 else [key, val[0]]
for key, val in parse.parse_qs(query_string).items()])
def join_url_components(components):
"""
All I want is a url equivalent of join...
:param components: string or list of strings
:return: string
"""
if isinstance(components, list) is False:
return components.strip('/')
result = components[0].strip('/')
for component in components[1:]:
result += '/' + component.strip('/')
return result
|
# Generated by Django 3.0.2 on 2020-03-13 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obsapp', '0008_product_product_date'),
]
operations = [
migrations.CreateModel(
name='LayoutPrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('layout1price', models.IntegerField(default=0)),
('layout2price', models.IntegerField(default=0)),
('layout3price', models.IntegerField(default=0)),
],
),
]
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
VK_APP_ID = '2427007'
VK_APP_KEY = ''
VK_APP_SECRET = 'ohRM9foTz8GBZQQ7cFwV'
GMAPS_API = 'ABQIAAAA92S7ccOh-SP6wUGsrpdL-BQizReYv0RYXMumyHYbF-1ckP5PhxTKio0m1x22jCulrae0aBIzIDpX3w'
PUBLIC_URLS = [
'^admin/',
'^callback/',
]
ADMINS = (
('Nergal', 'nergal.dev@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'lets.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'Europe/Kiev'
LANGUAGE_CODE = 'ru-ru'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = 'igv$=g_xo&bz8*szv8hi9qv2(sqfew$j&xk^kk2t07h!7dichg'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'vk_iframe.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'vk_iframe.middleware.LoginRequiredMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'lets.urls'
TEMPLATE_DIRS = (
'templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'vk_iframe',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'vk_iframe.backends.VkontakteUserBackend',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
import os
import urllib.request
VERSIONS = [
'4.2.0',
'4.1.2',
'4.1.1',
'4.1.0',
'3.4.9',
'3.4.8',
'3.4.7',
'3.4.6',
]
def downlown_opencvjs_file (version, filename):
url = "https://docs.opencv.org/%s/opencv.js" % version
with urllib.request.urlopen(url) as response, open(filename, 'wb') as out_file:
data = response.read() # a `bytes` object
out_file.write(data)
def main():
for v in VERSIONS:
print("Checking version %s" % v)
dst_dir_path = os.path.join(os.path.dirname(__file__), "v%s" % (v))
dst_file_path = os.path.join(dst_dir_path, "opencv.js")
if os.path.isfile(dst_file_path):
print("The file already exist. Skip this version.")
else:
print('Dowloadinag v%s' % v)
os.makedirs(dst_dir_path)
downlown_opencvjs_file(v, dst_file_path)
if __name__ == '__main__':
main()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main, TestCase
from json import loads
from functools import partial
from os.path import join
from tornado.web import HTTPError
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
import qiita_db as qdb
from qiita_db.handlers.reference import _get_reference
class UtilTests(TestCase):
def test_get_reference(self):
with self.assertRaises(HTTPError):
_get_reference(100)
obs = _get_reference(1)
self.assertEqual(obs, qdb.reference.Reference(1))
class ReferenceHandler(OauthTestingBase):
def test_get_reference_no_header(self):
obs = self.get('/qiita_db/references/1/')
self.assertEqual(obs.code, 400)
def test_get_reference_does_not_exist(self):
obs = self.get('/qiita_db/references/100/',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_get(self):
obs = self.get('/qiita_db/references/1/',
headers=self.header)
self.assertEqual(obs.code, 200)
db_test_raw_dir = qdb.util.get_mountpoint('reference')[0][1]
path_builder = partial(join, db_test_raw_dir)
fps = {
'reference_seqs': path_builder("GreenGenes_13_8_97_otus.fasta"),
'reference_tax': path_builder(
"GreenGenes_13_8_97_otu_taxonomy.txt"),
'reference_tree': path_builder("GreenGenes_13_8_97_otus.tree")}
exp = {'name': 'Greengenes', 'version': '13_8', 'files': fps}
self.assertEqual(loads(obs.body), exp)
if __name__ == '__main__':
main()
|
import cv2
import base64
img = cv2.imread("D:/21.jpg")
retval, buffer = cv2.imencode('.jpg', img)
jpg_as_text = base64.b64encode(buffer)
print('len before : ',len(jpg_as_text))
print('Original Dimensions : ',img.shape)
scale_percent = 40 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
retval, buffer = cv2.imencode('.jpg', resized)
jpg_as_text = base64.b64encode(buffer)
print('len after : ',len(jpg_as_text))
print('Resized Dimensions : ',resized.shape)
with open("D:/21.jpg", "rb") as image:
f = image.read()
print( type(f) )
print( len(f) )
b = bytearray(f)
print(type(b))
print(len(b))
# cv2.imshow("Resized image", resized)
# cv2.waitKey(0)
# cv2.destroyAllWindows() |
import numpy as np
def newton_step(y, f, fy, fyy, ltol=1e-3):
'''
Compute the Newton step for a function fun on the Grassmann
manifold `Gr(n,p)`.
Parameters
----------
y : (n, p) ndarray
Starting point on `Gr(n,p)`.
f : double
Value of `fun` at `y`.
fy : (n, p) ndarray
Gradient of `fun` at `y` with respect to each element in `y`.
fyy : (np, np) ndarray
Hessian of `fun` at `y` with respect to each element in the
input matrix. The element in place `((j-1)n + i,(l-1)n + k)`
should be `d^2f/dy_{ij}dy_{kl}`.
ltol : double
Smallest eigenvalue of the Hessian that is tolerated. If the
Hessian has an eigenvalue smaller than this, a multiple of the
identity matrix is added to the Hessian so that the resulting
matrix has `ltol` as its smallest eigenvalue.
Returns
-------
v : (n, p) ndarray
A vector in the tangent space of Gr(n, p) at `y`, i.e. `y'v = 0.`
See Also
--------
minimize : Minimizing a function over a Gr(n,p), using Newton's
method if a Hessian is provided, and the steepest descent method
otherwise.
Notes
-----
The Newton step is `v = Pi(B^-1 grad f)`, where `Pi` is projection
onto the tangent plane, `B = Hess(f) + cI, Hess(f)` is a matrix
interpretation of the Hessian of fun on `Gr(n,p)`, `c` is chosen such
that the smallest eigenvalue of `B` is `ltol` and `grad f` is the gradient
of `fun` along `Gr(n,p)`.
References
----------
Edelman, A., Arias, T. A., and Smith S. T. (1998) The geometry of
algorithms with orthogonality constraints.
SIAM J. Matrix Anal. Appl., 20(2), 303-353.
'''
n, p = y.shape
pit = np.eye(n) - np.dot(y, y.T) # projection onto tangent space
pitTpit = np.dot(pit.T, pit)
b = -np.dot(pit, fy).reshape(n*p, 1, order='F')
A = np.zeros((n*p, n*p))
B = np.dot(y.T, fy)
B = (B + B.T)*1./2 # Ensure that the Hessian is symmetric
for j in range(p):
for l in range(p):
flj = fyy[j*n:(j+1)*n, l*n:(l+1)*n]
a1 = np.dot(pit.T, np.dot(flj, pit))
a2 = B[l, j] * pitTpit
A[j*n:(j+1)*n, l*n:(l+1)*n] = a1 - a2
lam = np.min(np.linalg.eigvals(A))
if (lam <= ltol):
A = A + np.diag(np.repeat(ltol - lam, n*p))
v = np.linalg.solve(A, b)
v = v.reshape(n, p, order='F')
v = np.dot(pit, v)
return v
|
#!/usr/bin/env python
''' WxPython App '''
import wx
class MenuBar(wx.MenuBar):
def __init__(self):
menuBar = wx.MenuBar()
# Append Menu for File Menu
fileMenu = wx.Menu()
fileMenu.Append(wx.NewId(), "&Open", "")
fileMenu.Append(wx.NewId(), "&Save", "")
fileMenu.AppendSeparator()
fileMenu.Append(wx.NewId(), "&Exit", "")
menuBar.Append(fileMenu, "&File")
# Append Menu for Edit Menu
editMenu = wx.Menu()
editMenu.Append(wx.NewId(), "&Copy", "")
editMenu.Append(wx.NewId(), "&Cut", "")
editMenu.Append(wx.NewId(), "&Paste", "")
editMenu.AppendSeparator()
editMenu.Append(wx.NewId(), "&Options", "")
menuBar.Append(editMenu, "&Edit")
self.SetMenuBar(menuBar)
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, id=-1, title='WxPython App', pos=(150, 150),
size=(500,500),
style=wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
# create Panel
self._create_Panel()
# create MenuBar
self._create_MenuBar()
# create StatusBar
self._create_StatusBar()
# create Buttons
self._create_Button()
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
def _create_Panel(self):
self._panel = wx.Panel(self)
self._panel.SetBackgroundColour('White')
def _create_MenuBar(self):
self._menuBar = wx.MenuBar()
# Append Menu for File Menu
self._fileMenu = wx.Menu()
self._fileMenu.Append(wx.NewId(), "&Open", "")
self._fileMenu.Append(wx.NewId(), "&Save", "")
self._fileMenu.AppendSeparator()
self._fileMenu.Append(wx.NewId(), "&Exit", "")
self._menuBar.Append(self._fileMenu, "&File")
# Append Menu for Edit Menu
self._editMenu = wx.Menu()
self._editMenu.Append(wx.NewId(), "&Copy", "")
self._editMenu.Append(wx.NewId(), "&Cut", "")
self._editMenu.Append(wx.NewId(), "&Paste", "")
self._editMenu.AppendSeparator()
self._editMenu.Append(wx.NewId(), "&Options", "")
self._menuBar.Append(self._editMenu, "&Edit")
self.SetMenuBar(self._menuBar)
def _create_StatusBar(self):
self._statusBar = self.CreateStatusBar()
def _create_Button(self):
self._buttonText = wx.Button(self._panel, label="Enter Text", pos=(200,380), size=(80,30))
self._buttonChoice = wx.Button(self._panel, label="Select", pos=(300,380), size=(80,30))
self._buttonQuit = wx.Button(self._panel, label="Quit", pos=(400,380), size=(80,30))
self.Bind(wx.EVT_BUTTON, self.OnEnterText, self._buttonText)
self.Bind(wx.EVT_BUTTON, self.OnChoice, self._buttonChoice)
self.Bind(wx.EVT_BUTTON, self.OnQuit, self._buttonQuit)
# Event dealing
def OnEnterText(self, event):
text = wx.TextEntryDialog(self, "Please Enter Your Name:", 'Your Name')
if text.ShowModal() == wx.ID_OK:
name = text.GetValue()
msgBox = wx.MessageDialog(self, 'Hello, '+name, 'Welcome', wx.OK)
msgBox.ShowModal()
def OnChoice(self, event):
dlg = wx.SingleChoiceDialog(self, 'Please select your age:',
'Your Age',
['<20', '20-40', '40-60', '60-80', '>80'])
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetStringSelection()
msgBox = wx.MessageDialog(self, 'You select: '+response, 'Information', wx.OK)
msgBox.ShowModal()
def OnQuit(self, event):
msgBox = wx.MessageDialog(self, 'Are you sure want to exit?',
'Question', wx.YES_NO | wx.ICON_QUESTION)
if msgBox.ShowModal() == wx.ID_YES:
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
class App(wx.App):
def OnInit(self):
self.frame = Frame()
self.frame.Show()
self.SetTopWindow(self.frame)
return True
def main():
app = App()
app.MainLoop()
if __name__ == '__main__':
main()
|
# 674. Longest Continuous Increasing Subsequence
#
# Input: [1,3,5,4,7]
# Output: 3
# Explanation: The longest continuous increasing subsequence is [1,3,5], its length is 3.
# Even though [1,3,5,7] is also an increasing subsequence,
# it's not a continuous one where 5 and 7 are separated by 4.
# Example 2:
# Input: [2,2,2,2,2]
# Output: 1
# Explanation: The longest continuous increasing subsequence is [2], its length is 1.
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0: return 0
dp = [1] * len(nums)
for i in range(1,len(nums)):
dp[i] = dp[i-1] + 1 if nums[i] > nums[i-1] else 1
return max(dp)
if __name__ == '__main__':
sol = Solution()
assert sol.findLengthOfLCIS([1,3,5,4,7]) == 3 |
from django.db import models
from django.urls import reverse
# Create your models here.
class Year(models.Model):
year_id = models.AutoField(primary_key=True)
year = models.IntegerField(unique=True)
def __str__(self):
return '%s' % self.year
class Meta:
ordering = ['year']
class Period(models.Model):
period_id = models.AutoField(primary_key=True)
period_sequence = models.IntegerField(unique=True)
period_name=models.CharField(max_length=45,unique=True)
def __str__(self):
return '%s' % self.period_name
class Meta:
ordering = ['period_sequence']
class Semester(models.Model):
semester_id = models.AutoField(primary_key=True)
year = models.ForeignKey(Year, related_name='semesters', on_delete=models.PROTECT)
period = models.ForeignKey(Period, related_name='semesters', on_delete=models.PROTECT)
def __str__(self):
return '%s - %s' % (self.year.year, self.period.period_name)
def get_absolute_url(self):
return reverse('courseinfo_semester_detail_urlpattern', kwargs={'pk':self.pk})
def get_update_url(self):
return reverse('courseinfo_semester_update_urlpattern', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('courseinfo_semester_delete_urlpattern', kwargs={'pk': self.pk})
class Meta:
ordering =['year__year', 'period__period_sequence']
unique_together=('year', 'period')
class Course(models.Model):
course_id = models.AutoField(primary_key=True)
course_number = models.CharField(max_length=20)
course_name = models.CharField(max_length=225)
def __str__(self):
return '%s - %s' % (self.course_number,self.course_name)
class Meta:
ordering = ['course_number', 'course_name']
unique_together = (('course_number','course_name'),)
def get_absolute_url(self):
return reverse('courseinfo_course_detail_urlpattern',kwargs={'pk':self.pk})
def get_update_url(self):
return reverse('courseinfo_course_update_urlpattern', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('courseinfo_course_delete_urlpattern', kwargs={'pk': self.pk})
class Instructor(models.Model):
instructor_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
def __str__(self):
return '%s, %s' % (self.last_name,self.first_name)
class Meta:
ordering = ['last_name', 'first_name']
unique_together = (('last_name', 'first_name'),)
def get_absolute_url(self):
return reverse('courseinfo_instructor_detail_urlpattern',kwargs={'pk':self.pk})
def get_update_url(self):
return reverse('courseinfo_instructor_update_urlpattern', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('courseinfo_instructor_delete_urlpattern', kwargs={'pk': self.pk})
class Student(models.Model):
student_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
nick_name = models.CharField(max_length=45,blank=True,default='')
def __str__(self):
result = ''
if self.nick_name == '':
result = '%s, %s' % (self.last_name, self.first_name)
else:
result = '%s, %s (%s)' % (self.last_name, self.first_name, self.nick_name)
return result
class Meta:
ordering = ['last_name','first_name', 'nick_name']
unique_together = (('last_name','first_name','nick_name'),)
def get_absolute_url(self):
return reverse('courseinfo_student_detail_urlpattern',kwargs={'pk':self.pk})
def get_update_url(self):
return reverse('courseinfo_student_update_urlpattern', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('courseinfo_student_delete_urlpattern', kwargs={'pk': self.pk})
class Section(models.Model):
section_id = models.AutoField(primary_key=True)
section_name = models.CharField(max_length=10)
semester = models.ForeignKey(Semester,related_name='sections',on_delete=models.PROTECT)
course = models.ForeignKey(Course,related_name='sections',on_delete=models.PROTECT)
instructor = models.ForeignKey(Instructor,related_name='sections',on_delete=models.PROTECT)
def __str__(self):
return '%s -%s (%s)' % (self.course.course_name, self.section_name, self.semester.__str__())
def get_absolute_url(self):
return reverse('courseinfo_section_detail_urlpattern',kwargs={'pk':self.pk})
def get_update_url(self):
return reverse('courseinfo_section_update_urlpattern', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('courseinfo_section_delete_urlpattern', kwargs={'pk': self.pk})
class Meta:
ordering = ['course__course_number', 'section_name', 'semester__semester_name']
unique_together = (('semester', 'course', 'instructor'),)
class Registration(models.Model):
registration_id = models.AutoField(primary_key=True)
student = models.ForeignKey(Student, related_name='registrations', on_delete=models.PROTECT)
section = models.ForeignKey(Section, related_name='registrations', on_delete=models.PROTECT)
def __str__(self):
return '%s / %s' % (self.section, self.student)
print(self.section)
#call the sting function of the section and student modal
class Meta:
ordering = ['section', 'student']
unique_together = (('section', 'student'),)
def get_absolute_url(self):
return reverse('courseinfo_registration_detail_urlpattern',kwargs={'pk':self.pk})
def get_update_url(self):
return reverse('courseinfo_registration_update_urlpattern', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('courseinfo_registration_delete_urlpattern', kwargs={'pk': self.pk})
|
#-- GAUDI jobOptions generated on Fri Jul 17 16:32:45 2015
#-- Contains event types :
#-- 11104020 - 116 files - 2010993 events - 432.26 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125577
#-- StepId : 125577
#-- StepName : Sim08a - 2012 - MD - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r3
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-md100
#-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r11
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000028_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000035_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000036_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000037_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000038_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000039_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000040_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000041_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000042_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000043_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000044_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000045_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000046_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000047_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000048_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000049_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000050_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000051_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000052_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000053_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000054_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000055_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000056_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000057_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000058_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000059_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000060_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000061_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000062_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000063_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000064_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000065_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000066_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000067_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000068_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000069_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000070_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000071_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000072_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000073_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000074_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000075_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000076_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000077_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000078_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000079_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000080_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000081_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000082_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000083_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000084_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000085_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000086_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000087_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000088_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000089_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000090_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000091_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000092_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000093_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000094_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000095_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000096_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000097_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000098_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000099_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000100_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000101_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000102_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000103_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000104_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000105_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000106_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000107_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000108_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000109_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000110_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000111_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000112_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000113_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000114_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000115_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030280/0000/00030280_00000116_1.allstreams.dst'
], clear=True)
|
print('''
OPERADORES DE COMPARACION
Devuelve valores booleanos.
-Igual que ==
-Distinto que !=
-menor que <
-menor igual que <=
-Mayor que >
-Mayor igual que >=
-Asignación =
Ejemplo:
15 > 3 -> True
5 < 1 -> False
3 == 3 -> True
4 == 2+2 -> True
PRESCENDENCIA DE OPERADORES
Siempre se va a ejecutar lo operadores aritméticos y
luego se ejecutan lo de comparación
Ejemplo:
8 + 0 > 8 -> False
10 != 5 * 2 -> False
''')
print("El resultado de: 4+3 > 6-1 es:", 4+3 > 6-1)
print("El resultado de: 6+2*3 == 9/3*4 es: ", 6+2*3 == 9/3*4)
print ("En la operación anterior primero multiplica 2*3==6 +6 son 12, luego divide 9/3==3 * 4, son 12 y luego compara los resultados. True")
|
from gym.envs.registration import register
register(
id="overcookedEnv-v0",
entry_point="overcooked.envs:OvercookedEnvironment",
)
|
from django.urls import path
from . import views
urlpatterns = [
path('movie/<int:movieId>', views.get_movie, name='get_movie'),
path('login',views.login, name='login'),
path('recommendations',views.recommendations,name='recommendations'),
# path('popularity_movies',views.get_popularity_movies,name='popularity_movies'),
path('search_movies',views.SearchMovies.as_view(),name='search_movies')
] |
# from flask import request
#
# from usermanager.app import app
# from usermanager.dao.user import UserMongoDBDao
# from usermanager.mongodb import user_collection
# from usermanager.utils.handle_api import handle_response
#
#
# META_SUCCESS = {'status': 200, 'msg': '成功!'}
# META_ERROR = {'status': 404, 'msg': '失败!该用户不存在!'}
#
#
# @app.route('/user/profile', methods=['GET'])
# @handle_response
# def user_profile():
# username = request.args.get("username")
# user_mongodb_dao = UserMongoDBDao(user_collection)
#
# user = user_mongodb_dao.get_user(username)
# if not user:
# return {'meta': META_ERROR}
#
# # Remove password attribute
# data = user.jsonify()
# data.pop('password')
# return {'data': data, 'meta': META_SUCCESS}
|
import random
def deal(deck):
card = random.choice(deck)
deck.remove(card)
return card
def playBlackjack():
players = [AI('Stardust'), Manual('Manual')]
deck_count = 1
scores = [0 for _ in players]
deck = ['A','K','Q','J',10,9,8,7,6,5,4,3,2] * 4 * deck_count
faceUp = deal(deck)
dealt = [faceUp]
for id, player in enumerate(players):
hand = Hand([deal(deck), deal(deck)])
dealt.extend(hand)
while hand.value() <= 21:
if player.choice(hand, faceUp, dealt):
card = deal(deck)
dealt.append(card)
hand.addCard(card)
else:
break # If player chooses to stay, stop dealing
finalVal = hand.value()
scores[id] = finalVal
faceDown = deal(deck)
dealer = Hand([faceUp,faceDown])
while dealer.value() < 17 or dealer.value(1) == (17,1):
dealer.addCard(deal(deck))
finalDealer = dealer.value()
if finalDealer > 21:
print('Dealer busted with {}!'.format(finalDealer))
elif finalDealer == 21:
print("Dealer got a blackjack!")
else:
print("Dealer scored {}.".format(finalDealer))
for id, player in enumerate(players):
score = scores[id]
if score > 21:
print('{} busted with {}.'.format(player.name,score))
elif score == 21:
print('{} got a blackjack!'.format(player.name,score))
else:
if score > finalDealer or finalDealer > 21:
status = 'won'
elif score == finalDealer:
status = 'tied'
else:
status = 'lost'
print("{} scored {} and {}.".format(player.name,score,status))
print('\n\n')
class Hand():
def __init__(self, cards):
self.cards = []
for card in cards:
self.addCard(card)
def __iter__(self):
return iter(self.cards)
def addCard(self, card):
self.cards.append(card)
def value(self, dealer = 0):
val = 0
aceCount = 0
soft = 0
for card in self.cards:
if card == 'A':
aceCount += 1
val += 1
elif type(card) == str:
val += 10
elif type(card) == int:
val += card
for ace in range(aceCount):
if val <= 11:
soft = 1
val += 10
else:
break
if dealer:
return val, soft
else:
return val
class Player():
def __init__(self, name):
self.name = name
class AI(Player):
def choice(self, hand, faceUp, dealt): # Returns whether to hit (1) or stay (0) on a given hand
if hand.value() < 19:
return 1
else:
return 0
class Manual(Player):
def choice(self, hand, faceUp, dealt):
return int(input('Hit (1) or stay (0) with hand of {}?\n'.format(hand.cards)))
while 1: playBlackjack()
|
#标注训练集的anchor
"""
在训练集中,将每个anchor作为一个训练样本,为了训练目标检测模型,需要为每个anchor标注两类标签:
1.anchor所含目标的类别,简称类别
2.真实边界框ground truth相对anchor的偏移量,偏移量offset,
在目标检测时,
1.首先生成多个anchor,
2.然后为每个anchor预测类别以及偏移量,
3.接着根据预测的偏移量调整anchor位置从而得到预测边界框,
4.最终筛选需要输出的预测边界框
在目标检测的训练集中,每个图像已经标注了真实边界框ground truth的位置以及所含目标的类别;
在生成anchor之后,主要依据与anchor相似的真实边界框ground truth的位置和类别信息为anchor标注,
问题:该如何为anchor分配与其相似的真实边界框ground truth呢
假设图像中的anchor分别为A1,A2,...,Ana;真实边界框ground truth分别为B1,B2,...,Bnb,且na>=nb(即anchor的数量大于ground truth的数量)
定义矩阵X属于R-na*nb,其中第i行第j列的元素xij为anchor Ai与ground-truth Bj 的IOU
1.首先,找出矩阵X中最大元素,并将该元素的行索引和列索引分别记为i1,j1
为anchor Ai1分配ground-truth Bj1; 显然,anchor Ai1 和ground-truth Bj1 在所有'anchor and ground-truth'中相似度最高
2.接下来,将矩阵X中第i1行和第j1列上所有的元素丢弃,(我觉得这里是因为Bj1和Ai1互为最佳匹配,因此不需要再进行选择)
找出矩阵X中剩余的最大元素,并将该元素的行索引和列索引分别记为i2,j2,
为anchor Ai2分配ground-turth Bj2 再讲矩阵中第i2行和第j2列所有元素丢弃
3.此时矩阵X中已有两行、两列元素被丢弃,
以此类推,知道矩阵X中所有nb列元素全部被丢弃,(因为一般anchor的数量大于ground-truth)
这个时候,已经为nb个anchor各分配了一个 ground-truth,
4.接下来,只需要遍历剩余的na-nb个anchor:
给定其中的anchor Ai ,根据矩阵X的第i行找到与Ai IOU最大的ground-truth Bj(值得注意的是,这里X和Bj用的应该是未丢弃元素之前的矩阵和元素),
且只有当该IOU大于预先设定的阈值时,才为anchor Ai分配真实边界框Bj
现在可以标注anchor的类别class和偏移量offset
如果一个anchor A 被分配了ground-truth B,将anchor A的类别设为B的类别,并根据B和A的中心坐标的相对位置以及两个框的相对大小为anchor A标注偏移量
有数据集中各个框的位置和大小各异,因此这些相对位置和相对大小通常需要一些特殊变换,才能使偏移量的分布更加均匀,从而更容易拟合
设anchor A及其分配的ground-truth B的中心坐标分别为(xa,ya)和(xb,yb); A 和 B的宽分别为wa和wb; 高分别为ha和hb; 一个常用的技巧是将A的偏移量标注为:
公式见文档,其中常数的默认值为 (mu)_x=(mu)_y=(mu)_w=(mu)_h=0, (sigma)_x=(sigma)_y=0.1, (sigma)_w=(sigma)_h=0.2
如果一个anchor没有被分配真实边界框ground-truth ,那么只需要将该anchor的类别设为背景,类别为背景的anchor通常被称为父类anchor,其余被称为正类anchor
"""
#sample
"""
读取图像中猫和狗的真实边界框,其中第一个二元素为类别(0为狗,1为猫),剩余四个元素分别为左上角的x和y,以及右下角的x和y,(值域在[0,1])
此时通过左上角和右下角的坐标够早了5个需要标注的anchor,分别记为A0,...,A4
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data
import torchvision.transforms as transforms
import numpy as np
import math
from PIL import Image
import matplotlib.pyplot as plt
#加载和显示图像
img=Image.open('./catdog.jpg')
plt.imshow(img)
plt.show()
plt.close()
print(img.size) #值得注意的是,这里返回的是w*h
w,h=img.size
#画出这些anchor与ground truth在图像中的位置
bbox_scale=torch.tensor((w,h,w,h),dtype=torch.float32)
#ground_truth
#每一行表示的是图像中一个目标的ground-truth:[class,(bbox_coordinate)],注意需要尺度的恢复
ground_truth=torch.tensor([[0,0.1,0.08,0.52,0.92],
[1,0.55,0.2,0.9,0.88]])
#定义anchor
#anchor只是坐标
anchors=torch.tensor(
[[0,0.1,0.2,0.3],
[0.15,0.2,0.4,0.4],
[0.63,0.05,0.88,0.98],
[0.66,0.45,0.8,0.8],
[0.57,0.3,0.92,0.9]]
)
def show_bboxes(axes,bboxes,labels=None,colors=None):
def _make_list(obj,default_values=None):
if obj is None:
obj=default_values
elif not isinstance(obj,(list,tuple)):
obj=[obj]
return obj
labels=_make_list(labels)
colors=_make_list(colors,['b','g','r','m','c'])
for i,bbox in enumerate(bboxes): #索引一整行
color=colors[i%len(colors)]
rect=plt.Rectangle(
xy=(bbox[0],bbox[1]),
width=bbox[2]-bbox[0],
height=bbox[3]-bbox[1],
fill=False,
linewidth=2
)
axes.add_patch(rect)
if labels and len(labels)>i:
text_color='k' if color=='w' else 'w' #相当于三目运算符,如果color==‘w',那么,text_color='k',否则,test_color='w'
axes.text(rect.xy[0],rect.xy[1],labels[i],
va='center',ha='center',fontsize=6,
color=text_color,bbox=dict(facecolor=color,lw=0))
fig=plt.imshow(img)
#绘制ground-truth and anchor
show_bboxes(fig.axes,ground_truth[:,1:]*bbox_scale,['dog','cat'],'k')
show_bboxes(fig.axes,anchors*bbox_scale,['0','1','2','3','4'])
plt.show()
|
import tensorflow as tf
from ._tf_oplib import _op_lib
from kungfu.python import current_rank
class Queue:
def __init__(self, src, dst, id):
self._src = int(src)
self._dst = int(dst)
self._id = int(id)
def get(self, dtype, shape):
return _op_lib.kungfu_queue_get(
T=dtype,
shape=shape,
src=self._src,
dst=self._dst,
qid=self._id,
)
def put(self, x):
return _op_lib.kungfu_queue_put(
x,
src=self._src,
dst=self._dst,
qid=self._id,
)
def new_queue(src, dst):
"""new_queue creates a queue from src to dst.
Returns None, if the current peer is not an endpoint,
otherwise returns the queue ID.
"""
rank = current_rank()
if src != rank and dst != rank:
return None
q = _op_lib.kungfu_new_queue(src=src, dst=dst)
return Queue(src, dst, q)
|
import math
import pytest
import ipywidgets
from pandas_visual_analysis import DataSource
from pandas_visual_analysis.utils.config import Config
from pandas_visual_analysis.widgets import BrushSummaryWidget
from tests import sample_dataframes
@pytest.fixture(scope="module")
def small_df():
return sample_dataframes.small_df()
@pytest.fixture(scope="module")
def rand_float_df():
return sample_dataframes.random_float_df(1000, 10)
@pytest.fixture(scope="module")
def populated_config():
config = Config()
config.alpha = 0.75
config.select_color = (0, 0, 0)
config.deselect_color = (0, 0, 0)
config.color_scale = [
[0, "rgb(%d,%d,%d)" % config.deselect_color],
[1, "rgb(%d,%d,%d)" % config.select_color],
]
class TestInit:
def test_basic_creation(self, small_df):
ds = DataSource(small_df, None)
BrushSummaryWidget(ds, 0, 0, 1.0, 300)
def test_normal_few_cols_error(self, small_df):
df = small_df.drop(columns=["a", "c"])
ds = DataSource(df, None)
with pytest.raises(ValueError):
BrushSummaryWidget(ds, 0, 0, 1.0, 400)
class TestBuild:
def test_basic_build(self, small_df, populated_config):
ds = DataSource(small_df, None)
bs = BrushSummaryWidget(ds, 0, 0, 1.0, 400)
root = bs.build()
assert isinstance(root, ipywidgets.Widget)
assert isinstance(root, ipywidgets.VBox)
assert len(root.children) == 2
class TestChanges:
def test_indices_changed(self, small_df, populated_config):
ds = DataSource(small_df, None)
bs = BrushSummaryWidget(ds, 0, 0, 1.0, 400)
ds.brushed_indices = [0]
brushed_row = ds.brushed_data
assert bs.brushed_metrics.loc["mean"]["a"] == brushed_row["a"][0]
assert bs.brushed_metrics.loc["mean"]["c"] == brushed_row["c"][0]
assert bs.brushed_metrics["a"]["count"] == 1.0
assert bs.brushed_metrics["c"]["count"] == 1.0
def test_metric_changed_basic(self, small_df):
ds = DataSource(small_df, None)
bs = BrushSummaryWidget(ds, 0, 0, 1.0, 400)
bs.metric_select.value = "min"
class TestMapValues:
def test_basic_map(self):
assert BrushSummaryWidget._map_value(5, 0, 10, 0, 1) == 0.5
def test_map_nan(self):
assert BrushSummaryWidget._map_value(1, math.nan, 2, 0, 1) == 0
def test_map_bigger(self):
assert BrushSummaryWidget._map_value(3, 0, 1, 0, 10) == 10
def test_map_smaller(self):
assert BrushSummaryWidget._map_value(-1, 0, 10, 0, 1) == 0
class TestSelection:
def test_selection(self, small_df):
ds = DataSource(small_df, None)
bs = BrushSummaryWidget(ds, 0, 0, 1.0, 400)
bs.on_selection(None, None, None)
def test_deselection(self, small_df):
ds = DataSource(small_df, None)
bs = BrushSummaryWidget(ds, 0, 0, 1.0, 400)
bs.on_deselection(None, None)
|
#nonlocal
x = 6#global
def f():
x = 5#nonlocal = not global and local
def g():
nonlocal x
x = 8
g()
print('x = ', x)
# x = 8 #local
f()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.