blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab388f793983f211a87b38848eb922961c385516
|
b6010878b98cc924bcafda43893e8ca1d375f4bb
|
/parser.py
|
a855ab1a133713966d57cc09d5575abd3b08e091
|
[] |
no_license
|
pepijndevos/bobcat
|
7fc06e17dbec63750845bcd6b75465485bd9d07f
|
d4dea0b62c9fe9d4396be0d17f94d7a5bf289022
|
refs/heads/master
| 2020-03-26T03:36:02.603236
| 2018-09-01T14:38:27
| 2018-09-01T14:38:27
| 144,462,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,296
|
py
|
from ply import lex
from ply.lex import TOKEN
tokens = [
# Identifiers
'ID',
# constants
'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN',
'FLOAT_CONST', 'HEX_FLOAT_CONST',
'CHAR_CONST',
#'WCHAR_CONST',
# String literals
'STRING_LITERAL',
#'WSTRING_LITERAL',
]
literals = "[](),:"
# valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers)
identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
bin_prefix = '0[bB]'
bin_digits = '[01]+'
# integer constants (K&R2: A.2.5.1)
integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?'
decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
octal_constant = '0[0-7]*'+integer_suffix_opt
hex_constant = hex_prefix+hex_digits+integer_suffix_opt
bin_constant = bin_prefix+bin_digits+integer_suffix_opt
bad_octal_constant = '0[0-7]*[89]'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
char_const = "'"+cconst_char+"'"
wchar_const = 'L'+char_const
unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
unmatched_doublequote = "(\""+cconst_char+"*\\n)|(\""+cconst_char+"*$)"
bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')"""
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
wstring_literal = 'L'+string_literal
bad_string_literal = '"'+string_char+'*?'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)'
binary_exponent_part = r'''([pP][+-]?[0-9]+)'''
hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))"""
hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)'
t_ignore = ' \t'
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(t):
return t
@TOKEN(hex_floating_constant)
def t_HEX_FLOAT_CONST(t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(t):
return t
@TOKEN(bin_constant)
def t_INT_CONST_BIN(t):
return t
@TOKEN(bad_octal_constant)
def t_BAD_CONST_OCT(t):
print("Invalid octal constant")
t.lexer.skip(1)
@TOKEN(octal_constant)
def t_INT_CONST_OCT(t):
return t
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(t):
return t
# Must come before bad_char_const, to prevent it from
# catching valid char constants as invalid
#
@TOKEN(char_const)
def t_CHAR_CONST(t):
return t
@TOKEN(wchar_const)
def t_WCHAR_CONST(t):
return t
@TOKEN(unmatched_quote)
def t_UNMATCHED_QUOTE(t):
print("Unmatched '")
t.lexer.skip(1)
@TOKEN(bad_char_const)
def t_BAD_CHAR_CONST(t):
print("Invalid char constant %s" % t.value)
t.lexer.skip(1)
@TOKEN(wstring_literal)
def t_WSTRING_LITERAL(t):
return t
@TOKEN(unmatched_doublequote)
def t_UNMATCHED_DOUBLEQUOTE(t):
print("Unmatched \"")
t.lexer.skip(1)
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(t):
print("Invalid string literal")
t.lexer.skip(1)
@TOKEN(identifier)
def t_ID(t):
return t
def t_COMMENT(t):
r'\#.*'
pass
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
import ply.yacc as yacc
from collections import namedtuple
class Juxt(list):
def __repr__(self):
return "Juxt"+list.__repr__(self)
Def = namedtuple("Def", ["name", "quotation"])
Lit = namedtuple("Lit", ["type", "value"])
Node = namedtuple("Node", ["type", "quotation"])
def p_expression(p):
"""expression : empty
| expression part"""
if len(p) > 2:
p[1].append(p[2])
p[0] = p[1]
else:
p[0] = []
def p_part(p):
"""part : definition
| word
| juxt
| quotation
| node"""
p[0] = p[1]
def p_word_id(p):
"""word : ID"""
p[0] = p[1]
def p_word_char(p):
"""word : CHAR_CONST"""
p[0] = Lit('char', p[1])
def p_word_float(p):
"""word : FLOAT_CONST
| HEX_FLOAT_CONST"""
p[0] = Lit('float', p[1])
def p_word_int(p):
"""word : INT_CONST_BIN
| INT_CONST_DEC
| INT_CONST_HEX
| INT_CONST_OCT"""
p[0] = Lit('int', p[1])
def p_word_string(p):
"""word : STRING_LITERAL"""
p[0] = Lit('char*', p[1])
def p_juxt(p):
"""juxt : word ',' word
| juxt ',' word"""
if isinstance(p[1], Juxt):
p[1].append(p[3])
p[0] = p[1]
else:
p[0] = Juxt([p[1], p[3]])
def p_quotation(p):
"""quotation : '[' expression ']'"""
p[0] = p[2]
def p_definition(p):
"""definition : ID ':' quotation"""
p[0] = Def(p[1], p[3])
def p_node(p):
"""node : ID '(' expression ')'"""
p[0] = Node(p[1], p[3])
def p_empty(p):
'empty :'
pass
# Error rule for syntax errors
def p_error(p):
print("Syntax error in input!")
# Build the parser
parser = yacc.yacc()
if __name__ == "__main__":
while True:
try:
s = input('> ')
except EOFError:
break
if not s: continue
result = parser.parse(s)
print(result)
|
[
"pepijndevos@gmail.com"
] |
pepijndevos@gmail.com
|
27456c5262db06b8bef96281690b636ec91d8907
|
073c2fd73875ce4e7d061623b8403f8d77c45d92
|
/cohesity_management_sdk/models/restore_app_task_state_proto.py
|
10ff0ddfb0d0caef8f2dea7883811f228b6e1723
|
[
"Apache-2.0"
] |
permissive
|
naveena-maplelabs/management-sdk-python
|
b11441b2edccc5a1262785bd559ad4b3ea984c3b
|
06ce4119d955dc08cdbc5109c935afcfcd9d65ab
|
refs/heads/master
| 2021-05-20T10:52:12.776816
| 2020-03-10T03:28:08
| 2020-03-10T03:28:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,158
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
import cohesity_management_sdk.models.restore_app_params
class RestoreAppTaskStateProto(object):
"""Implementation of the 'RestoreAppTaskStateProto' model.
TODO: type model description here.
Attributes:
app_restore_progress_monitor_subtask_path (string): The Pulse task
path to the application restore task sub tree. If the application
restore has to wait on other tasks (for example, a SQL db restore
may wait for a tail log backup or a VM restore), then this would
represent a sub-tree of 'progress_monitor_task_path' in
PerformRestoreTaskStateProto.
last_finished_log_backup_start_time_usecs (long|int): The start time
of the last finished log backup run. For SQL application, this is
set iff we need to take a tail log backup.
restore_app_params (RestoreAppParams): This message captures all the
necessary arguments specified by the user to restore an
application.
"""
# Create a mapping from Model property names to API property names
_names = {
"app_restore_progress_monitor_subtask_path":'appRestoreProgressMonitorSubtaskPath',
"last_finished_log_backup_start_time_usecs":'lastFinishedLogBackupStartTimeUsecs',
"restore_app_params":'restoreAppParams'
}
def __init__(self,
app_restore_progress_monitor_subtask_path=None,
last_finished_log_backup_start_time_usecs=None,
restore_app_params=None):
"""Constructor for the RestoreAppTaskStateProto class"""
# Initialize members of the class
self.app_restore_progress_monitor_subtask_path = app_restore_progress_monitor_subtask_path
self.last_finished_log_backup_start_time_usecs = last_finished_log_backup_start_time_usecs
self.restore_app_params = restore_app_params
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
app_restore_progress_monitor_subtask_path = dictionary.get('appRestoreProgressMonitorSubtaskPath')
last_finished_log_backup_start_time_usecs = dictionary.get('lastFinishedLogBackupStartTimeUsecs')
restore_app_params = cohesity_management_sdk.models.restore_app_params.RestoreAppParams.from_dictionary(dictionary.get('restoreAppParams')) if dictionary.get('restoreAppParams') else None
# Return an object of this model
return cls(app_restore_progress_monitor_subtask_path,
last_finished_log_backup_start_time_usecs,
restore_app_params)
|
[
"ashish@cohesity.com"
] |
ashish@cohesity.com
|
3e5b0ba83f75b854bad02097f5a41b1eb1fa092c
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/321/104630/submittedfiles/jogoDaVelha_BIB.py
|
ef680987742f43f7773f7fd8ccbe646aa5e714c4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
# -*- coding: utf-8 -*-
# COLOQUE SUA BIBLIOTECA A PARTIR DAQUI
import random
tabuleiro =[
[1,1,1],
[1,1,1],
[1,1,1]]
def nome():
nome = str(input('Qual seu nome? \n'))
return nome
def simbolo():
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) \n'))
while s != 'X' and s != 'O':
print('Insira um símbolo válido.')
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
if s == 'X':
computador = 'O'
else:
computador = 'X'
return
def sorteio():
#j1 = 'nome'
j2 = 'Computador'
sort = random.randint(0,1)
if sort == 1:
return print ('Vencedor do sorteio para início do jogo: %' % nome)
if sort == 0:
return print ('Vencedor do sorteio para início do jogo: %s' % j2)
def mostrar_tabuleiro(tabuleiro) :
print((tabuleiro[0] [0])+'|'+(tabuleiro[0] [1])+'|'+(tabuleiro[0] [2]))
print('')
print((tabuleiro[1] [0])+'|'+(tabuleiro[1] [1])+'|'+(tabuleiro[1] [2]))
print('')
print((tabuleiro[2] [0])+'|'+(tabuleiro[2] [1])+'|'+(tabuleiro[2] [2]))
return
def jogada():
sorteado = 1
print(v.index(sorteado))
v.remove(sorteado)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e05f017bf8b3f62820b45ff9151749491fee423e
|
10d08b2531672de54d924fec6654a978cb082055
|
/fos/actor/polygonlines.py
|
31b4955bed4b6a56f181ca6606143957e9376dcc
|
[
"BSD-3-Clause"
] |
permissive
|
arokem/Fos
|
05afa04f05ba3b0585e38dfa79ec7ae4332ec8f9
|
5066bbd74954ba7e60eeb06451f3a25ef1b291e2
|
refs/heads/master
| 2021-01-18T15:59:59.905221
| 2011-08-15T14:16:50
| 2011-08-15T14:16:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,855
|
py
|
import scipy.spatial as sp
import numpy as np
from fos.lib.pyglet.gl import *
from fos import Actor
from fos.core.intersection import ray_aabb_intersection
class PolygonLines(Actor):
def __init__(self,
vertices,
connectivity,
colors = None,
affine = None):
""" A TreeRegion, composed of many trees
vertices : Nx3
Local 3D coordinates x,y,z
connectivity : Mx2
Polygon line topology
colors : Nx4 or 1x4, float [0,1]
Color per vertex
affine : 4x4
Affine transformation of the actor
including translation
"""
super(PolygonLines, self).__init__()
if affine is None:
self.affine = np.eye(4, dtype = np.float32)
else:
self.affine = affine
self._update_glaffine()
self.vertices = vertices
self.connectivity = connectivity.ravel()
# this coloring section is for per/vertex color
if colors is None:
# default colors for each line
self.colors = np.array( [[1.0, 0.0, 0.0, 1.0]], dtype = np.float32).repeat(len(self.vertices), axis=0)
else:
# colors array is half the size of the connectivity array
assert( len(self.vertices) == len(colors) )
self.colors = colors
# create AABB using the vertices
self.make_aabb(margin=2.0)
# create kdtree
self.kdtree = sp.KDTree(self.vertices, 5)
# create pointers
self.vertices_ptr = self.vertices.ctypes.data
self.connectivity_ptr = self.connectivity.ctypes.data
self.connectivity_nr = self.connectivity.size
self.colors_ptr = self.colors.ctypes.data
# VBO related
self.vertex_vbo = GLuint(0)
glGenBuffers(1, self.vertex_vbo)
glBindBuffer(GL_ARRAY_BUFFER_ARB, self.vertex_vbo)
glBufferData(GL_ARRAY_BUFFER_ARB, 4 * self.vertices.size, self.vertices_ptr, GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0)
# for colors
self.colors_vbo = GLuint(0)
glGenBuffers(1, self.colors_vbo)
glBindBuffer(GL_ARRAY_BUFFER, self.colors_vbo)
glBufferData(GL_ARRAY_BUFFER, 4 * self.colors.size, self.colors_ptr, GL_STATIC_DRAW)
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0)
# for connectivity
self.connectivity_vbo = GLuint(0)
glGenBuffers(1, self.connectivity_vbo)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.connectivity_vbo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 4 * self.connectivity_nr, self.connectivity_ptr, GL_STATIC_DRAW)
def process_pickray(self,near,far):
""" Called when picking hit this actor
"""
# subdivide pickray and index the kdtree to find the indices of the
# closeby points. do then pickray-linesegment intersection
print "-------------"
print "near", near
print "far", far
# fine intersection points with aabb
# assume that intersection exists
near = np.array(near)
far = np.array(far)
print 'boundingbox', self.aabb.coord[0], self.aabb.coord[1]
ab1, ab2 = self.get_aabb_coords()
re = ray_aabb_intersection(near, far, ab1, ab2)
print "returned intersection points", re
# needs to have at least 2
if len(re) < 2:
return False
ne = np.array(re[0])
fa = np.array(re[1])
print "used near", ne
print "used far", fa
d = (fa-ne) / np.linalg.norm(fa-ne)
# how many subdivisions of the unit vector
nr_subdiv = 20
kdtree_sphere_query_radius = 2.0
dt = np.linalg.norm((fa-ne)) / 10
print "kdtree"
print self.kdtree.mins, self.kdtree.maxes
# create points
for i in range(nr_subdiv+1):
point = ne + (dt*i) * d
# apply inverse of affine transformation to get back
# to the original vertices space
point_inv = np.dot( np.linalg.inv(self.affine), np.array( [point[0], point[1], point[2], 1.0] ) )
point_inv2 = point_inv[:3]
ind = self.kdtree.query_ball_point(point_inv2, kdtree_sphere_query_radius)
if len(ind) > 0:
print 'change colors'
self.colors[ ind, 1 ] = 1.0
self.colors[ ind, 0 ] = 0.0
def draw(self):
self.draw2()
def draw1(self):
glPushMatrix()
glMultMatrixf(self.glaffine)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBindBuffer(GL_ARRAY_BUFFER_ARB, self.vertex_vbo)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0)
glBindBuffer(GL_ARRAY_BUFFER_ARB, self.colors_vbo)
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.connectivity_vbo)
glDrawElements(GL_LINES, self.connectivity_nr, GL_UNSIGNED_INT, 0)
if self.show_aabb:
self.draw_aabb()
glPopMatrix()
def draw2(self):
glPushMatrix()
glMultMatrixf(self.glaffine)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, self.vertices_ptr)
glColorPointer(4, GL_FLOAT, 0, self.colors_ptr)
glDrawElements(GL_LINES, self.connectivity_nr, GL_UNSIGNED_INT, self.connectivity_ptr)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
if self.show_aabb:
self.draw_aabb()
glPopMatrix()
|
[
"git@unidesign.ch"
] |
git@unidesign.ch
|
a4a434e1abbb243fe9c141b2cd61b361631bc774
|
55ceefc747e19cdf853e329dba06723a44a42623
|
/_CodeTopics/LeetCode/1-200/000012/000012.py
|
34382f7be371ec1d8887db2f2cda6c7f0cd89365
|
[] |
no_license
|
BIAOXYZ/variousCodes
|
6c04f3e257dbf87cbe73c98c72aaa384fc033690
|
ee59b82125f100970c842d5e1245287c484d6649
|
refs/heads/master
| 2023-09-04T10:01:31.998311
| 2023-08-26T19:44:39
| 2023-08-26T19:44:39
| 152,967,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
class Solution(object):
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
res = ""
nums = [1000, 500, 100, 50, 10, 5, 1]
letters = ["M", "D", "C", "L", "X", "V", "I"]
i = 0
flag = 0
while num > 0:
if num >= nums[i]:
res += letters[i]
num -= nums[i]
flag += 1
if flag == 4:
res = res[:-4] + letters[i] + letters[i-1]
if len(res) > 2 and res[-3] == res[-1]:
res = res[:-3] + letters[i] + letters[i-2]
else:
i += 1
flag = 0
return res
"""
https://leetcode-cn.com/submissions/detail/177308458/
3999 / 3999 个通过测试用例
状态:通过
执行用时: 56 ms
内存消耗: 13.1 MB
执行用时:56 ms, 在所有 Python 提交中击败了70.77%的用户
内存消耗:13.1 MB, 在所有 Python 提交中击败了19.03%的用户
"""
|
[
"noreply@github.com"
] |
BIAOXYZ.noreply@github.com
|
d0ca2278af93cf0521f1a71d22dd494b20804760
|
ea48ef0588c104e49a7ebec5bd8dc359fdeb6674
|
/api/snippets/serializers.py
|
802503d73cf66c4cf043190921ed0e2be369d0ef
|
[] |
no_license
|
Jizishuo/django--text
|
c0d58d739ef643c7f3793fbead19302778670368
|
152a5c99e7a16a75fda2f1f85edcfdce9274c9c2
|
refs/heads/master
| 2020-04-01T10:39:18.131551
| 2018-11-18T13:31:59
| 2018-11-18T13:31:59
| 153,125,799
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
class SnippetSerializer(serializers.Serializer): # 它序列化的方式很类似于Django的forms
id = serializers.IntegerField(read_only=True)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
code = serializers.CharField(style={'base_template': 'textarea.html'}) # style的设置等同于Django的widget=widgets.Textarea
linenos = serializers.BooleanField(required=False) # 用于对浏览器的上的显示
language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
def create(self, validated_data):
"""
Create and return a new `Snippet` instance, given the validated data.
"""
return Snippet.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance
|
[
"948369894@qq.com"
] |
948369894@qq.com
|
1fecc0c4d13d877ece282bd2a8ebf7d6f3d6fde6
|
5158d2aa9839dcf80340ef369db7eada19f3ff8b
|
/test.py
|
1fe2fcc807e3f3e645ccd632e01c386ea08a6bad
|
[] |
no_license
|
andrewsmedina/flh-pvcloud
|
31de2e5734a6f91db4c618fa20759300e2930596
|
1799b17039dde004461982466f58cc464e6488b8
|
refs/heads/master
| 2021-01-11T03:39:57.926994
| 2016-10-13T03:56:46
| 2016-10-13T03:56:46
| 71,403,725
| 0
| 0
| null | 2016-10-19T22:14:25
| 2016-10-19T22:14:24
| null |
UTF-8
|
Python
| false
| false
| 1,691
|
py
|
#!/usr/bin/env python
import os
import sys
import django
import json
def test_pvs_energy_daily():
from pvs.models import Energy
#pvs_serial = '0000000097894c9b'
pvs_serial = '00000000f6392e07'
pvs_en_daily = Energy.get_energy_daily_output(pvs_serial)
result_list = []
for entry in pvs_en_daily[pvs_serial].values():
result_list.append(entry.values())
result_list.sort(key=lambda x: x[0])
print('== pvs energy today daily result ==')
for entry in result_list:
print(entry)
def test_pvs_energy_hourly():
from pvs.models import Energy
pvs_list = Energy.get_distinct_serial()
print('distinct pvs serial: %s' % pvs_list)
#pvs_serial = '0000000097894c9b'
pvs_serial = '00000000f6392e07'
pvs_en_by_hour = Energy.get_energy_daily_output_by_hour(pvs_serial)
#print(pvs_en_by_hour)
#return
result_list = []
for entry in pvs_en_by_hour[pvs_serial].values():
result_list.append(entry.values())
result_list.sort(key=lambda x: x[0])
print('== pvs energy today hourly result ==')
for entry in result_list:
print(entry)
pvs_en_hourly = Energy.get_calculated_energy_hourly_output(pvs_serial)
result_list = []
for entry in pvs_en_hourly[pvs_serial].values():
result_list.append(entry.values())
result_list.sort(key=lambda x: x[0])
print('== pvs calculated energy hourly result ==')
for entry in result_list:
print(entry)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
django.setup()
test_pvs_energy_hourly()
#test_pvs_energy_daily()
|
[
"lee.shiueh@gmail.com"
] |
lee.shiueh@gmail.com
|
e95147eb86b47413be4c0af28598db219b730732
|
f5a53f0f2770e4d7b3fdace83486452ddcc996e1
|
/env3/lib/python3.6/site-packages/django_tables2/columns/linkcolumn.py
|
adf587625eda5d92ecd166bee5d301e54ef9251c
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
fireman0865/PingBox
|
35e8fc9966b51320d571b63967e352a134022128
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
refs/heads/master
| 2023-01-20T07:55:59.433046
| 2020-03-15T13:36:31
| 2020-03-15T13:36:31
| 247,466,832
| 1
| 0
|
Apache-2.0
| 2022-12-26T21:30:32
| 2020-03-15T12:59:16
|
Python
|
UTF-8
|
Python
| false
| false
| 6,118
|
py
|
from .base import Column, library
class BaseLinkColumn(Column):
"""
The base for other columns that render links.
Arguments:
text (str or callable): If set, this value will be used to render the
text inside link instead of value. The callable gets the record
being rendered as argument.
attrs (dict): In addition to ``attrs`` keys supported by `~.Column`, the
following are available:
- `a` -- ``<a>`` in ``<td>`` elements.
"""
def __init__(self, text=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text = text
def text_value(self, record, value):
if self.text is None:
return value
return self.text(record) if callable(self.text) else self.text
def value(self, record, value):
"""
Returns the content for a specific cell similarly to `.render` however
without any html content.
"""
return self.text_value(record, value)
def render(self, record, value):
return self.text_value(record, value)
@library.register
class LinkColumn(BaseLinkColumn):
"""
Renders a normal value as an internal hyperlink to another page.
.. note ::
This column should not be used anymore, the `linkify` keyword argument to
regular columns can be used to achieve the same results.
It's common to have the primary value in a row hyperlinked to the page
dedicated to that record.
The first arguments are identical to that of
`~django.urls.reverse` and allows an internal URL to be
described. If this argument is `None`, then `get_absolute_url`.
(see Django references) will be used.
The last argument *attrs* allows custom HTML attributes to be added to the
rendered ``<a href="...">`` tag.
Arguments:
viewname (str or None): See `~django.urls.reverse`, or use `None`
to use the model's `get_absolute_url`
urlconf (str): See `~django.urls.reverse`.
args (list): See `~django.urls.reverse`. [2]_
kwargs (dict): See `~django.urls.reverse`. [2]_
current_app (str): See `~django.urls.reverse`.
attrs (dict): HTML attributes that are added to the rendered
``<a ...>...</a>`` tag.
text (str or callable): Either static text, or a callable. If set, this
will be used to render the text inside link instead of value (default).
The callable gets the record being rendered as argument.
.. [2] In order to create a link to a URL that relies on information in the
current row, `.Accessor` objects can be used in the *args* or *kwargs*
arguments. The accessor will be resolved using the row's record before
`~django.urls.reverse` is called.
Example:
.. code-block:: python
# models.py
class Person(models.Model):
name = models.CharField(max_length=200)
# urls.py
urlpatterns = patterns('',
url("people/([0-9]+)/", views.people_detail, name="people_detail")
)
# tables.py
from django_tables2.utils import A # alias for Accessor
class PeopleTable(tables.Table):
name = tables.LinkColumn("people_detail", args=[A("pk")])
In order to override the text value (i.e. ``<a ... >text</a>``) consider
the following example:
.. code-block:: python
# tables.py
from django_tables2.utils import A # alias for Accessor
class PeopleTable(tables.Table):
name = tables.LinkColumn("people_detail", text="static text", args=[A("pk")])
age = tables.LinkColumn("people_detail", text=lambda record: record.name, args=[A("pk")])
In the first example, a static text would be rendered (``"static text"``)
In the second example, you can specify a callable which accepts a record object (and thus
can return anything from it)
In addition to *attrs* keys supported by `.Column`, the following are
available:
- `a` -- ``<a>`` elements in ``<td>``.
Adding attributes to the ``<a>``-tag looks like this::
class PeopleTable(tables.Table):
first_name = tables.LinkColumn(attrs={
"a": {"style": "color: red;"}
})
"""
def __init__(
self,
viewname=None,
urlconf=None,
args=None,
kwargs=None,
current_app=None,
attrs=None,
**extra
):
super().__init__(
attrs=attrs,
linkify=dict(
viewname=viewname,
urlconf=urlconf,
args=args,
kwargs=kwargs,
current_app=current_app,
),
**extra
)
@library.register
class RelatedLinkColumn(LinkColumn):
"""
Render a link to a related object using related object's ``get_absolute_url``,
same parameters as ``~.LinkColumn``.
.. note ::
This column should not be used anymore, the `linkify` keyword argument to
regular columns can be used achieve the same results.
If the related object does not have a method called ``get_absolute_url``,
or if it is not callable, the link will be rendered as '#'.
Traversing relations is also supported, suppose a Person has a foreign key to
Country which in turn has a foreign key to Continent::
class PersonTable(tables.Table):
name = tables.Column()
country = tables.RelatedLinkColumn()
continent = tables.RelatedLinkColumn(accessor="country.continent")
will render:
- in column 'country', link to ``person.country.get_absolute_url()`` with the output of
``str(person.country)`` as ``<a>`` contents.
- in column 'continent', a link to ``person.country.continent.get_absolute_url()`` with
the output of ``str(person.country.continent)`` as ``<a>`` contents.
Alternative contents of ``<a>`` can be supplied using the ``text`` keyword argument as
documented for `~.columns.LinkColumn`.
"""
|
[
"fireman0865@gmail.com"
] |
fireman0865@gmail.com
|
29412168a35029446a3f8458bcf3b90a9ca5c7bb
|
12a5b72982291ac7c074210afc2c9dfe2c389709
|
/online_judges/Codeforces/228/E/code.py
|
df84b84afc6501c45b644b377d5dc32fca5c6d4c
|
[] |
no_license
|
krantirk/Algorithms-and-code-for-competitive-programming.
|
9b8c214758024daa246a1203e8f863fc76cfe847
|
dcf29bf976024a9d1873eadc192ed59d25db968d
|
refs/heads/master
| 2020-09-22T08:35:19.352751
| 2019-05-21T11:56:39
| 2019-05-21T11:56:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
n = int(raw_input())
ciel = 0
jiro = 0
maxValues = []
for x in xrange(n):
l = map(int,raw_input().split())
if (l[0] % 2 == 0):
for i in xrange(0,(l[0])/2):
ciel += l[1 + i]
jiro += l[l[0]/2 + i + 1]
else:
for i in xrange(0,((l[0])/2)):
ciel += l[1 + i]
jiro += l[(l[0])/2 + i + 2]
maxValues.append(l[l[0]/2 + 1])
maxValues.sort()
k = 0
if(len(maxValues) % 2 == 0):
k = 1
for i in range(len(maxValues)-1,-1,-1):
if i % 2 == k:
ciel += maxValues[i]
else:
jiro += maxValues[i]
print ciel, jiro
|
[
"mariannelinharesm@gmail.com"
] |
mariannelinharesm@gmail.com
|
a9c8c88180249969cff26aeb304d575a5691efdc
|
bc441bb06b8948288f110af63feda4e798f30225
|
/app_store_sdk/model/resource_manage/filter_strategy_pb2.pyi
|
c7feb66ed7c223ea9ba231ff3e42ab314f2b82d4
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from app_store_sdk.model.console.cmdb_query_strategy_pb2 import (
CmdbQueryStrategy as app_store_sdk___model___console___cmdb_query_strategy_pb2___CmdbQueryStrategy,
)
from app_store_sdk.model.resource_manage.filter_condition_group_pb2 import (
FilterConditionGroup as app_store_sdk___model___resource_manage___filter_condition_group_pb2___FilterConditionGroup,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class FilterStrategy(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
instanceId = ... # type: typing___Text
strategyName = ... # type: typing___Text
strategyObjectId = ... # type: typing___Text
crontab = ... # type: typing___Text
ctime = ... # type: typing___Text
mtime = ... # type: typing___Text
creator = ... # type: typing___Text
modifier = ... # type: typing___Text
nextExecTime = ... # type: typing___Text
enable = ... # type: builtin___bool
updateAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
readAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
deleteAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
notifyUsers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
notifyMethods = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
org = ... # type: builtin___int
@property
def query(self) -> app_store_sdk___model___console___cmdb_query_strategy_pb2___CmdbQueryStrategy: ...
@property
def filter(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[app_store_sdk___model___resource_manage___filter_condition_group_pb2___FilterConditionGroup]: ...
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
strategyName : typing___Optional[typing___Text] = None,
strategyObjectId : typing___Optional[typing___Text] = None,
query : typing___Optional[app_store_sdk___model___console___cmdb_query_strategy_pb2___CmdbQueryStrategy] = None,
filter : typing___Optional[typing___Iterable[app_store_sdk___model___resource_manage___filter_condition_group_pb2___FilterConditionGroup]] = None,
crontab : typing___Optional[typing___Text] = None,
ctime : typing___Optional[typing___Text] = None,
mtime : typing___Optional[typing___Text] = None,
creator : typing___Optional[typing___Text] = None,
modifier : typing___Optional[typing___Text] = None,
nextExecTime : typing___Optional[typing___Text] = None,
enable : typing___Optional[builtin___bool] = None,
updateAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
readAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
deleteAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
notifyUsers : typing___Optional[typing___Iterable[typing___Text]] = None,
notifyMethods : typing___Optional[typing___Iterable[typing___Text]] = None,
org : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FilterStrategy: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FilterStrategy: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"creator",b"creator",u"crontab",b"crontab",u"ctime",b"ctime",u"deleteAuthorizers",b"deleteAuthorizers",u"enable",b"enable",u"filter",b"filter",u"instanceId",b"instanceId",u"modifier",b"modifier",u"mtime",b"mtime",u"nextExecTime",b"nextExecTime",u"notifyMethods",b"notifyMethods",u"notifyUsers",b"notifyUsers",u"org",b"org",u"query",b"query",u"readAuthorizers",b"readAuthorizers",u"strategyName",b"strategyName",u"strategyObjectId",b"strategyObjectId",u"updateAuthorizers",b"updateAuthorizers"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
6e33bdb52df70a1d3f34b66cbe70b041167f2189
|
3fbfabfaaada7b9b77e8a1df8fed4de444070d49
|
/session_10/Employee.py
|
ab419235cf5bb43b1a9bf7a2b7ac1bb3742016d0
|
[
"MIT"
] |
permissive
|
dravate/spark_python_course
|
df36a561ab2cf8f763dd02655319cd6bf5b7876c
|
519389fdb21d78cd6d19e1ad2f7c782bc1449a83
|
refs/heads/main
| 2023-07-08T06:53:27.635106
| 2021-08-03T14:44:55
| 2021-08-03T14:44:55
| 385,127,461
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
class Employee:
'Common base class for all employees'
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print ("Total Employee {}".format( Employee.empCount))
def displayEmployee(self):
print ("Name : ", self.name, ", Salary: ", self.salary)
def __str__(self):
return self.name + ' ' + str(self.salary)
"This would create first object of Employee class"
emp1 = Employee("Zara", 2000)
"This would create second object of Employee class"
emp2 = Employee("Manni", 5000)
emp1.displayEmployee()
#emp2.displayEmployee()
print ("Total Employee {}".format( Employee.empCount))
#print (emp2)
|
[
"sopan.shewale@gmail.com"
] |
sopan.shewale@gmail.com
|
67df998e15aaf63b368447b166ddfc3cd29f7411
|
60d0252aabe5d929af8c94cdddd502605e7bafdd
|
/crawler_novels/www.ck101.org.py
|
4946797433701f255fa0933a31a2593df36791fe
|
[] |
no_license
|
SlovEnt/Web_Craler_Series
|
ead253b56f99bcd0bac33c2a66d226673c7f68fe
|
9f77858826254d6631486f4770760e9e78baea68
|
refs/heads/master
| 2020-06-09T23:30:38.984620
| 2019-09-13T13:55:02
| 2019-09-13T13:55:02
| 193,528,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,238
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'SlovEnt'
__date__ = '2019/6/24 22:07'
import time
import os
from bs4 import BeautifulSoup
from collections import OrderedDict
from chs_tools.get_html_page import get_html_all_content, chrome_get_html_all_content
from chs_tools.print_log import C_PrintLog
from chs_tools.param_info import rtn_parainfo
import traceback
plog = C_PrintLog()
PARAINFO = rtn_parainfo()
DOWN_FLODERS = PARAINFO["NOVEL_DOWN_FLODERS"]
ROOT_URL = "https://www.ck101.org" # 网站根目录
GENERAL_PATH = "" # 通用路径
NOVEL_SUB_ID = "198/198015" # 目录页面ID
ENCODING = "GBK" # 页面文字编码
CHAPTER_POST = 1
"https://www.ck101.org/198/198015/"
if GENERAL_PATH == "":
FULL_URL = "{0}/{1}/".format(ROOT_URL, NOVEL_SUB_ID)
else:
FULL_URL = "{0}/{1}/{2}/index.html".format(ROOT_URL, GENERAL_PATH, NOVEL_SUB_ID)
plog.debug("小说下载首页为:{0}".format(FULL_URL))
def rtn_chapter_list_info(html):
soup = BeautifulSoup(html, 'html.parser')
novelName = soup.find_all(name="div", attrs={"class": "infot"})[0].h1.text
# novelName = novelName.split("《")[1]
# novelName = novelName.split("》")[0]
# novelName = "妾本惊华"
plog.debug("开始下载《{0}》".format(novelName))
chapterListInfoSoup = soup.find_all(name="div" , attrs={"class": "dccss"})
# print(chapterListInfoSoup)
chapterListInfoArr = []
n = 0
for ddItem in chapterListInfoSoup:
# print(ddItem)
n += 1
# if n <= 12:
# continue
chapterListInfoDict = OrderedDict()
chapterListInfoDict2 = OrderedDict()
if "href" not in str(ddItem):
continue
if n < CHAPTER_POST:
continue
chapterListInfoDict["text"] = ddItem.a.text
chapterListInfoDict["href"] = ddItem.a["href"]
chapterListInfoArr.append(chapterListInfoDict)
# chapterListInfoDict2["text"] = "接"
# nextPageUrl = ddItem.a["href"].split(".")
# nextPageUrl = "{0}_2.{1}".format(nextPageUrl[0], nextPageUrl[1])
# chapterListInfoDict2["href"] = nextPageUrl
# chapterListInfoArr.append(chapterListInfoDict2)
plog.tmpinfo(chapterListInfoDict)
return chapterListInfoArr, novelName
def rtn_chapter_txt(chapterHtml):
# print("---------------chapterHtml-----------------\n",chapterHtml,"\n\n\n\n")
chapterHtml = chapterHtml.replace("<br />", "\n")
soup = BeautifulSoup(chapterHtml, 'html.parser')
try:
soupSub = soup.find_all(name="div", attrs={"id": "content"})[0]
# soupSubStr = str(soupSub)
# print("---------------soupSubStr-----------------\n",soupSubStr,"\n\n\n\n")
# soupSubStr = "{0}{1}".format(soupSubStr.split("<div")[0],"</article>")
# soupSub = BeautifulSoup(soupSubStr, 'html.parser')
txtContent = soupSub.text
txtContent = txtContent.replace(" ", "")
txtContent = txtContent.replace(" ", "")
txtContent = txtContent.replace("\n\n", "\n")
txtContent = txtContent.replace("\xa0", "")
txtContent = txtContent.replace("page_top();", "")
txtContent = txtContent.replace("\n电脑天使这边走→", "")
txtContent = txtContent.replace("\nWAP天使戳这边→", "")
txtContent = txtContent.replace('\n")>', "")
txtContent = txtContent.replace("\nAPP天使来这边→", "")
txtContent = txtContent + "\n"
# txtContent = txtContent.split("/c/o/m")[1] + "\n"
print(txtContent)
return txtContent
except:
time.sleep(2)
traceback.print_exc()
print("--------------- chapterHtml error -----------------\n", chapterHtml)
return False
def write_txt_content(txtFileName, chapterName, chapterTxt, encoding):
with open(txtFileName, 'a', encoding=encoding) as f:
chapterName = chapterName.replace("www.ggdown.com", "")
chapterName = chapterName.replace(" :", "")
if chapterName == "接":
pass
else:
f.write(chapterName + "\n")
# print(chapterTxt)
f.write(chapterTxt)
if __name__ == '__main__':
html = get_html_all_content(FULL_URL, "info_right", ENCODING)
# 返回章节信息
chapterListInfo, novelName = rtn_chapter_list_info(html)
novelFilePath = r"{0}\{1}.txt".format(DOWN_FLODERS, novelName)
if CHAPTER_POST == 1:
if (os.path.exists(novelFilePath)):
os.remove(novelFilePath)
n = 0
for chapterInfo in chapterListInfo:
n += 1
chapterUrl = "{0}{1}".format(ROOT_URL, chapterInfo["href"])
plog.debug("{3}/{4} 网址:{0},页面章节标题:{2},文件路径:{1} !!!".format(chapterUrl, novelFilePath, chapterInfo["text"], n, len(chapterListInfo)))
chapterHtml = get_html_all_content(chapterUrl, "content", ENCODING)
chapterTxt = rtn_chapter_txt(chapterHtml)
# print(str(chapterHtml))
if chapterTxt is not False:
write_txt_content(novelFilePath, chapterInfo["text"], chapterTxt, ENCODING)
else:
plog.error("获取失败!!!!!!")
|
[
"slovent@163.com"
] |
slovent@163.com
|
fe9e7ae36613eddbc9b67fab34ddea53b95cc7bc
|
baf3b9e0f80a545ba1e087d54e4de7a9fe10f279
|
/滑动窗口/209_长度最小的子数组.py
|
6f00de24476ca4c1e8c21ef53268ba5989b0febb
|
[] |
no_license
|
llin0420/Leetcode_in_python3
|
5e0755c9b6bb5fe92fd1e0bd7d563e8a3a20f88a
|
41684ff99b2f881304b08c3c753b0df48e0a6b40
|
refs/heads/master
| 2022-12-15T16:23:39.991920
| 2020-09-09T08:05:55
| 2020-09-09T08:05:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,402
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 12 13:17:24 2020
@author: leiya
"""
'''
0712
这道题在滑动窗口中稍微有些特殊
我们说过,尽量将对输出的更新放到小循环外面,即尽量不要在缩小窗口的时候更新输出,以防特殊情况
但是这道题需要将更新放入小循环中,因为只有在小循环中才能找到需要更新的精确数值,一旦出了小循环,就不是如何要求的输出值了
在这道题里就是出了小循环,该滑动窗口和<s,不满足更新输出值的要求,这道题可以和904水果题类比
水果题进入小循环是不符合要求,这道题是进入小循环符合要求,要在符合要求的基础上看看有没有最优解,因为我们是默认移动end的,所以
这两道题的性质决定了更新输出值的位置,但我们仍要尽力将其放到while外面,这道题不在外面无关紧要,因为只有进入循环以后才可能有解
不在循环内的时候不需要更新解,即使一直没有进入内循环,我们也可以通过if min_len == float('inf')来解决特殊情况
0718:这道题还需要明确一点,即该题的窗口也无须回缩(回缩是指start前进的时候,end回缩去比较这个窗口,但在这道题里也不需要去比较这个窗口)
原因在于一开始[start:end]肯定是sum_ < s了,end才会向后移动一位,[start:new_end],当start前进时[start+1:end](由new_end回缩回end),这个窗口是
一开始[start:end]的子集,大窗口的和dou < s,更不用说他的子集了,因此回缩在这道题里没有意义,无须比较,这一点看似写题时候无关紧要,但是一定要
判断清楚再去写,这是这道题之所以这么写的本质
'''
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
#sum_>=s的时候可以收缩窗口,这就是我们要找的while条件,因为
#窗口不定长,所以用while,典型的不定长滑动窗口
start = 0
min_len = float('inf')
sum_ = 0
for end in range(len(nums)):
sum_ += nums[end]
while sum_ >= s:
min_len = min(min_len,end-start+1)
sum_ -= nums[start]
start += 1
if min_len == float('inf'):
return 0
else:
return min_len
|
[
"792412589@qq.com"
] |
792412589@qq.com
|
ae7809f3604c6604f22ba1fa70a1e9c3ed5f2c69
|
57d978b0ba29c745cf409e819f2f7e42e98516dc
|
/ibs/_input.py
|
ce3078f28411719e86def04fdd019b03afda1c2e
|
[
"MIT"
] |
permissive
|
lnls-fac/lnls
|
26564b9bdaf0602e2f50d67e294d0d8ba5cc0f82
|
91299d84ce7628d33c46861687d17a574428bc85
|
refs/heads/master
| 2023-02-09T00:26:47.298350
| 2022-11-25T19:25:50
| 2022-11-25T19:25:50
| 33,893,299
| 5
| 2
|
MIT
| 2022-11-25T19:25:51
| 2015-04-13T20:51:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
import numpy as _np
import copy as _copy
def read_energy_acceptance_file(fname, eRF):
# reads raw data from file
lines = [line.strip() for line in open(fname)]
# processes raw data
accp, accn = [], []
for line in lines:
if not line or line[0] == '#':
continue
values = [float(word) for word in line.split()]
pos, e_ac = values[4], values[7]
if e_ac > 0.0:
accp.append([pos,min(abs(e_ac),eRF)])
else:
accn.append([pos,min(abs(e_ac),eRF)])
accp = _np.array(accp)
accn = _np.array(accn)
return (accp,accn)
def read_twiss_file(fname, orig_parameters):
# reads raw data from file
lines = [line.strip() for line in open(fname)]
parameters = _copy.deepcopy(orig_parameters)
# processes raw data into twiss and element structures
twiss, elements = [], []
for line in lines:
words = line.split()
if not words or words[0][0] == '*':
continue
if words[0][0] == '#':
if words[0] == '#MCF':
parameters.mcf = float(words[1])
elif words[0] == '#I1':
parameters.latt_i1 = float(words[1])
elif words[0] == '#I2':
parameters.latt_i2 = float(words[1])
elif words[0] == '#I3':
parameters.latt_i3 = float(words[1])
elif words[0] == '#I4':
parameters.latt_i4 = float(words[1])
elif words[0] == '#I5':
parameters.latt_i5 = float(words[1])
elif words[0] == '#I6':
parameters.latt_i6 = float(words[1])
else:
pass
continue
else:
if float(words[3]) > 0:
values = [float(word) for word in words[2:]]
values = values + [0,0] # for acceptances insertion latter on
#print(values)
twiss.append(values)
elements.append(words[0])
twiss = _np.array(twiss)
elements = _np.array(elements)
return (elements, twiss, parameters)
|
[
"xresende@gmail.com"
] |
xresende@gmail.com
|
bf10c5772f31298525eb55957cf0421f17ad7983
|
87cac4166f07729f1c94066259996c8b752c1202
|
/examples/calc/calc_distance.py
|
dc0d473aceccd019cbfe3f28a154c888c08db7e6
|
[] |
no_license
|
danielsocials/bbox
|
068238a15880468d214109a23017a19e70fc13ec
|
292e350b1cefbbab987baf8c946d4021abd211ea
|
refs/heads/master
| 2020-03-16T06:25:47.907369
| 2018-05-08T04:42:45
| 2018-05-08T04:42:45
| 132,554,332
| 0
| 0
| null | 2018-05-08T04:36:35
| 2018-05-08T04:36:35
| null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
import asyncio
from aiobbox.cluster import get_cluster
from aiobbox.client import pool
from aiobbox.handler import BaseHandler
class Handler(BaseHandler):
def add_arguments(self, parser):
parser.add_argument('--a',
type=float)
parser.add_argument('--b',
type=float)
async def run(self, args):
#for _ in range(2):
#r = await pool.calc.echostr('888', retry=100)
# print(r)
# await asyncio.sleep(3)
r = await pool.calc.add2num(args.a, args.b)
print(r)
|
[
"superisaac.ke@gmail.com"
] |
superisaac.ke@gmail.com
|
c26c22a572fc6788014f0e253f267b21cc87a9dd
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/statsmodels/2016/12/test_tsaplots.py
|
91616d7673599e86c8dfeba8b0c28021f08f1d27
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 4,803
|
py
|
from statsmodels.compat.python import lmap, map
import numpy as np
import pandas as pd
from numpy.testing import dec, assert_equal
import statsmodels.api as sm
from statsmodels.graphics.tsaplots import (plot_acf, plot_pacf, month_plot,
quarter_plot, seasonal_plot)
import statsmodels.tsa.arima_process as tsp
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_plot_acf():
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1., -0.9]
ma = np.r_[1., 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=10)
plot_acf(acf, ax=ax)
plot_acf(acf, ax=ax, alpha=None)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_acf_irregular():
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1., -0.9]
ma = np.r_[1., 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=np.arange(1, 11))
plot_acf(acf, ax=ax, lags=10, zero=False)
plot_acf(acf, ax=ax, alpha=None, zero=False)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_pacf():
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1., -0.9]
ma = np.r_[1., 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_pacf(pacf, ax=ax)
plot_pacf(pacf, ax=ax, alpha=None)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_pacf_irregular():
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1., -0.9]
ma = np.r_[1., 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_pacf(pacf, ax=ax, lags=np.arange(1, 11))
plot_pacf(pacf, ax=ax, lags=10, zero=False)
plot_pacf(pacf, ax=ax, alpha=None, zero=False)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_month():
dta = sm.datasets.elnino.load_pandas().data
dta['YEAR'] = dta.YEAR.astype(int).apply(str)
dta = dta.set_index('YEAR').T.unstack()
dates = lmap(lambda x: pd.tseries.tools.parse_time_string('1 '+' '.join(x))[0],
dta.index.values)
# test dates argument
fig = month_plot(dta.values, dates=dates, ylabel='el nino')
plt.close(fig)
# test with a TimeSeries DatetimeIndex with no freq
dta.index = pd.DatetimeIndex(dates)
fig = month_plot(dta)
plt.close(fig)
# w freq
dta.index = pd.DatetimeIndex(dates, freq='MS')
fig = month_plot(dta)
plt.close(fig)
# test with a TimeSeries PeriodIndex
dta.index = pd.PeriodIndex(dates, freq='M')
fig = month_plot(dta)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_quarter():
dta = sm.datasets.macrodata.load_pandas().data
dates = lmap('Q'.join, zip(dta.year.astype(int).apply(str),
dta.quarter.astype(int).apply(str)))
# test dates argument
quarter_plot(dta.unemp.values, dates)
plt.close('all')
# test with a DatetimeIndex with no freq
parser = pd.tseries.tools.parse_time_string
dta.set_index(pd.DatetimeIndex((x[0] for x in map(parser, dates))),
inplace=True)
quarter_plot(dta.unemp)
plt.close('all')
# w freq
# see pandas #6631
dta.index = pd.DatetimeIndex((x[0] for x in map(parser, dates)),
freq='QS-Oct')
quarter_plot(dta.unemp)
plt.close('all')
# w PeriodIndex
dta.index = pd.PeriodIndex((x[0] for x in map(parser, dates)),
freq='Q')
quarter_plot(dta.unemp)
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_seasonal_plot():
rs = np.random.RandomState(1234)
data = rs.randn(20,12)
data += 6*np.sin(np.arange(12.0)/11*np.pi)[None,:]
data = data.ravel()
months = np.tile(np.arange(1,13),(20,1))
months = months.ravel()
df = pd.DataFrame([data,months],index=['data','months']).T
grouped = df.groupby('months')['data']
labels = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
fig = seasonal_plot(grouped, labels)
ax = fig.get_axes()[0]
output = [tl.get_text() for tl in ax.get_xticklabels()]
assert_equal(labels, output)
plt.close('all')
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
71ae2a5380a5dac7a539c945d44e63ada72a53ba
|
bf4f4731f099dcdc964509b51ffc6ce8875f6041
|
/ll.py
|
c48d2277a0c507a6b81eae89b47f552294de014e
|
[
"MIT"
] |
permissive
|
YanglanWang/squad
|
8c9b98d3bd4ff0fe824bc08fc2a314fb45936b38
|
1019e4c7bf1a90c049d16ed1b48553964468c790
|
refs/heads/master
| 2020-06-19T19:02:30.891520
| 2019-08-02T15:53:15
| 2019-08-02T15:53:15
| 196,834,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
# import torch
# import torch.nn.functional as F
# import torch.nn as nn
# # input is of size N x C = 3 x 5
# m = nn.LogSoftmax(dim=1)
# input = torch.randn(3, 5, requires_grad=True)
# print(input)
# print(F.softmax(input))
# print(F.log_softmax(input))
# print(m(input))
# # each element in target has to have 0 <= value < C
# target = torch.tensor([1, 0, 4])
# output = F.nll_loss(m(input), target)
# print(output)
# print(output.backward())
# import string
#
# def remove_punc(text):
# exclude = set(string.punctuation)
# return ''.join(ch for ch in text if ch not in exclude)
# def white_space_fix(text):
# return ' '.join(text.split())
# b=remove_punc('I LOVE the Beijing!')
# a=white_space_fix(b)
# print(a)
# from collections import Counter
#
# a=Counter(['a','abc','bca','js','a','b','b'])
# b=Counter(['c','ccc','aa','a'])
# c=a&b
# print(c)
# prediction='abc'
# ground_truths=['abc','aa']
# b=(float(bool(prediction) == bool(ground_truths)))
# print(b)
import torch
a=torch.randn(3,5)
print(a)
max_in_row,_=torch.max(a,dim=1)
max_in_col,_=torch.max(a,dim=0)
print(max_in_row,max_in_col)
start_idxs=torch.argmax(max_in_row,-1)
end_idxs=torch.argmax(max_in_col,-1)
print(start_idxs,end_idxs)
|
[
"yanglan-17@mails.tsinghua.edu.cn"
] |
yanglan-17@mails.tsinghua.edu.cn
|
818a6404248346ef4bcc70ea941cc25a5cc2006c
|
1b7b13984a90b5d11331966fe1b1bb88f2b85dd7
|
/modeling/test.py
|
b583abada1374a6055aa896f3d6fcaa16e67abe6
|
[] |
no_license
|
yekeren/VCR
|
23cf6b17ce7adcc057a531182898900dcd75727b
|
6a573edf3183e9a8ef2056449c53865d8e27125d
|
refs/heads/master
| 2023-04-06T05:41:09.851825
| 2020-04-14T18:26:42
| 2020-04-14T18:26:42
| 232,395,848
| 0
| 0
| null | 2023-03-24T23:35:46
| 2020-01-07T19:01:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import os
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from protos import pipeline_pb2
from modeling import trainer
from readers import reader
from readers.vcr_text_only_reader import InputFields
from readers.vcr_text_only_reader import NUM_CHOICES
from vcr import builder
from protos import pipeline_pb2
import json
flags.DEFINE_string('model_dir', None,
'Path to the directory which holds model checkpoints.')
flags.DEFINE_string('pipeline_proto', None, 'Path to the pipeline proto file.')
FLAGS = flags.FLAGS
FIELD_ANSWER_PREDICTION = 'answer_prediction'
def _load_pipeline_proto(filename):
"""Loads pipeline proto from file.
Args:
filename: Path to the pipeline config file.
Returns:
An instance of pipeline_pb2.Pipeline.
"""
with tf.io.gfile.GFile(filename, 'r') as fp:
return text_format.Merge(fp.read(), pipeline_pb2.Pipeline())
def main(_):
logging.set_verbosity(logging.DEBUG)
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
pipeline_proto = _load_pipeline_proto(FLAGS.pipeline_proto)
for example in trainer.predict(pipeline_proto, FLAGS.model_dir):
batch_size = len(example['question'])
for i in range(batch_size):
print('#' * 128)
print(example['question'][i])
print(example['answer_label'][i])
import pdb
pdb.set_trace()
for j in range(4):
sentence = []
for token, indicator in zip(example['answer_choices'][i, j],
example['shortcut_mask'][i, j]):
if not indicator:
sentence.append(token.decode('utf8') + '[REMOVE]')
else:
sentence.append(token.decode('utf8'))
print(' '.join(sentence))
print(example['answer_logits'][i][j].tolist())
print(example['a_soft_sample'][i][j].tolist())
print()
if __name__ == '__main__':
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_proto')
app.run(main)
|
[
"yekeren.cn@gmail.com"
] |
yekeren.cn@gmail.com
|
96df20298ac9dc1fdaca45b61783c50aaaad575f
|
219d7694180482e0b9944deb6dee11dcf7bf0e23
|
/morecvutils/connectedComponents.py
|
58ed9279dc1b6fef5516c8a7b21f18623978e5ce
|
[
"MIT"
] |
permissive
|
Aresthu/morecvutils
|
5e3bfcba2b5c48ec022e641f19b40e2836d2d6bd
|
4856d98c45dbd6bcfb86c87f7ec9987c378e244c
|
refs/heads/master
| 2021-04-09T16:03:01.956218
| 2018-02-18T07:14:41
| 2018-02-18T07:14:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
import cv2
try: #OpenCV 2.4
from cv2 import SimpleBlobDetector as SimpleBlobDetector
except ImportError: #OpenCV 3
from cv2 import SimpleBlobDetector_create as SimpleBlobDetector
from numpy import asarray
def doblob(morphed,blobdet,img,anno=True):
"""
img: can be RGB (MxNx3) or gray (MxN)
http://docs.opencv.org/master/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
http://docs.opencv.org/trunk/modules/features2d/doc/drawing_function_of_keypoints_and_matches.html
"""
keypoints = blobdet.detect(morphed)
nkey = len(keypoints)
kpsize = asarray([k.size for k in keypoints])
final = img.copy() # is the .copy necessary?
final = cv2.drawKeypoints(img, keypoints, outImage=final,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#%% plot count of blobs
if anno:
cv2.putText(final, text=str(nkey), org=(int(img.shape[1]*.9),25),
fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=2,
color=(0,255,0), thickness=2)
return final,nkey,kpsize
def setupblob(minarea, maxarea, mindist):
blobparam = cv2.SimpleBlobDetector_Params()
blobparam.filterByArea = True
blobparam.filterByColor = False
blobparam.filterByCircularity = False
blobparam.filterByInertia = False
blobparam.filterByConvexity = False
blobparam.minDistBetweenBlobs = mindist
blobparam.minArea = minarea
blobparam.maxArea = maxarea
#blobparam.minThreshold = 40 #we have already made a binary image
return SimpleBlobDetector(blobparam)
|
[
"scivision@users.noreply.github.com"
] |
scivision@users.noreply.github.com
|
df595c77d73ecc81233488dc652b38172a935850
|
d86c52f4098fd9c1a102c2d3f5630556e0610fa2
|
/fitle/myenv/Lib/site-packages/django/urls/base.py
|
0caa424c33ca350337f9994d131b8e12e6d6d6ca
|
[] |
no_license
|
makadama/bitbucket
|
24f05c4946168ed15d4f56bfdc45fd6c0774e0f2
|
cabfd551b92fe1af6d9d14ab9eb3d9974b64aa79
|
refs/heads/master
| 2023-06-19T19:04:03.894599
| 2021-07-15T12:10:39
| 2021-07-15T12:10:39
| 385,203,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b1fa9d73dc9c6873504ab270b4646eb91e5cd202f9f0a9fffff2342e2183d8ab
size 5587
|
[
"adamamakhtarsow@gmail.com"
] |
adamamakhtarsow@gmail.com
|
3a5912957350ed986573050c9f331ab000478692
|
5d0e8ac83fc0e39adb1b031cc01187bcdeb3a452
|
/h2o-py/tests/testdir_javapredict/pyunit_javapredict_irisDRF.py
|
dde1d997ffd534e4322911996f0d558a268813e9
|
[
"Apache-2.0"
] |
permissive
|
xxushans/h2o-3
|
f466a3faebb7342c7e41266b5d8ba0a40a2d1cff
|
1567366c926b932acf8051a9ef579b966133f5f8
|
refs/heads/master
| 2021-01-14T08:56:24.477570
| 2015-10-20T20:25:23
| 2015-10-20T20:25:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
def javapredict_iris_drf():
# optional parameters
params = {'ntrees':100, 'max_depth':5, 'min_rows':10}
print "Parameter list:"
for k,v in zip(params.keys(), params.values()): print "{0}, {1}".format(k,v)
train = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_train.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_train.csv"))
x = ["sepal_len","sepal_wid","petal_len","petal_wid"]
y = "species"
pyunit_utils.javapredict("random_forest", "class", train, test, x, y, **params)
javapredict_iris_drf()
|
[
"eric.eckstrand@gmail.com"
] |
eric.eckstrand@gmail.com
|
334b108ede915f2978c4c60ecab922e2db736edb
|
8dc84558f0058d90dfc4955e905dab1b22d12c08
|
/third_party/catapult/catapult_build/js_checks.py
|
78cc745ebd8c0a9d391d4f7b6b80392c3d74fce2
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
meniossin/src
|
42a95cc6c4a9c71d43d62bc4311224ca1fd61e03
|
44f73f7e76119e5ab415d4593ac66485e65d700a
|
refs/heads/master
| 2022-12-16T20:17:03.747113
| 2020-09-03T10:43:12
| 2020-09-03T10:43:12
| 263,710,168
| 1
| 0
|
BSD-3-Clause
| 2020-05-13T18:20:09
| 2020-05-13T18:20:08
| null |
UTF-8
|
Python
| false
| false
| 3,390
|
py
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import eslint
from py_vulcanize import strip_js_comments
from catapult_build import parse_html
class JSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
if file_filter:
self.file_filter = file_filter
else:
self.file_filter = lambda x: True
def RunChecks(self):
"""Checks for violations of the Chromium JavaScript style guide.
See:
http://chromium.org/developers/web-development-style-guide#TOC-JavaScript
"""
results = []
affected_files = self.input_api.AffectedFiles(
file_filter=self.file_filter,
include_deletes=False)
def ShouldCheck(f):
if f.LocalPath().endswith('.js'):
return True
if f.LocalPath().endswith('.html'):
return True
return False
affected_js_files = filter(ShouldCheck, affected_files)
error_lines = []
for f in affected_js_files:
contents = list(f.NewContents())
error_lines += CheckStrictMode(
'\n'.join(contents),
is_html_file=f.LocalPath().endswith('.html'))
if affected_js_files:
success, eslint_output = eslint.RunEslint(
[f.AbsoluteLocalPath() for f in affected_js_files])
if not success:
error_lines.append('\neslint found lint errors:')
error_lines.append(eslint_output)
if error_lines:
error_lines.insert(0, 'Found JavaScript style violations:')
results.append(
_MakeErrorOrWarning(self.output_api, '\n'.join(error_lines)))
return results
def _ErrorHighlight(start, length):
"""Produces a row of '^'s to underline part of a string."""
return start * ' ' + length * '^'
def _MakeErrorOrWarning(output_api, error_text):
return output_api.PresubmitError(error_text)
def CheckStrictMode(contents, is_html_file=False):
statements_to_check = []
if is_html_file:
statements_to_check.extend(_FirstStatementsInScriptElements(contents))
else:
statements_to_check.append(_FirstStatement(contents))
error_lines = []
for s in statements_to_check:
if s != "'use strict'":
error_lines.append('Expected "\'use strict\'" as first statement, '
'but found "%s" instead.' % s)
return error_lines
def _FirstStatementsInScriptElements(contents):
"""Returns a list of first statements found in each <script> element."""
soup = parse_html.BeautifulSoup(contents)
script_elements = soup.find_all('script', src=None)
return [_FirstStatement(e.get_text()) for e in script_elements]
def _FirstStatement(contents):
"""Extracts the first statement in some JS source code."""
stripped_contents = strip_js_comments.StripJSComments(contents).strip()
matches = re.match('^(.*?);', stripped_contents, re.DOTALL)
if not matches:
return ''
return matches.group(1).strip()
def RunChecks(input_api, output_api, excluded_paths=None):
def ShouldCheck(affected_file):
if not excluded_paths:
return True
path = affected_file.LocalPath()
return not any(re.match(pattern, path) for pattern in excluded_paths)
return JSChecker(input_api, output_api, file_filter=ShouldCheck).RunChecks()
|
[
"arnaud@geometry.ee"
] |
arnaud@geometry.ee
|
dd4532867225260b493c78ccb86707a010c68f3e
|
412b0612cf13e9e28b9ea2e625975f3d9a2f52b6
|
/2022/16/pressure_release.py
|
450cad8ba653b096a272d2898024b05e9b964992
|
[] |
no_license
|
AlexClowes/advent_of_code
|
2cf6c54a5f58db8482d1692a7753b96cd84b6279
|
d2158e3a4edae89071e6a88c9e874a9a71d4d0ec
|
refs/heads/master
| 2022-12-24T19:02:07.815437
| 2022-12-23T17:35:53
| 2022-12-23T17:35:53
| 225,618,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
from collections import defaultdict
import functools
import heapq
from itertools import combinations, pairwise, permutations
import re
from tqdm import tqdm
def main():
pat = r"Valve (\w+) has flow rate=(\d+); tunnels? leads? to valves? ([,\s\w]+)"
flow_rate = {}
graph = defaultdict(dict)
with open("valves.txt") as f:
for line in f:
valve, rate, tunnels = re.match(pat, line.strip()).groups()
flow_rate[valve] = int(rate)
for other_valve in tunnels.split(", "):
graph[valve][other_valve] = 1
def get_min_dist(start, dest):
seen = set()
q = [(0, start)]
while q:
total_dist, pos = heapq.heappop(q)
if pos == dest:
return total_dist
if pos not in seen:
seen.add(pos)
for new_pos, dist in graph[pos].items():
heapq.heappush(q, (total_dist + dist, new_pos))
start_times = {
valve: 29 - get_min_dist("AA", valve)
for valve, flow in flow_rate.items()
if flow
}
# Make graph connected
for v1, v2 in combinations(list(graph), 2):
if (v1 == "AA" or flow_rate[v1]) and flow_rate[v2]:
graph[v1][v2] = graph[v2][v1] = get_min_dist(v1, v2)
# Prune valves with no flow rate
for v1 in list(graph):
if not flow_rate[v1]:
for v2 in graph[v1]:
del graph[v2][v1]
del graph[v1]
def max_flow(time, pos, seen, flow):
ret = flow
for new_pos in graph:
if new_pos in seen:
continue
new_time = time - graph[pos][new_pos] - 1
if new_time <= 0:
continue
new_seen = seen + (new_pos,)
new_flow = flow + new_time * flow_rate[new_pos]
ret = max(ret, max_flow(new_time, new_pos, new_seen, new_flow))
return ret
print(
max(
max_flow(time, valve, (valve,), time * flow_rate[valve])
for valve, time in start_times.items()
)
)
if __name__ == "__main__":
main()
|
[
"alexclowes@gmail.com"
] |
alexclowes@gmail.com
|
b52273a1e19daac8b6173e5b3287d5a6ad803d06
|
4bfdb3ad5b44044113d3d6b586e10281d3987c9e
|
/infra/bots/recipe_modules/upload_dm_results/api.py
|
3005f3e77790c38666ee47a5e9b661f92619102c
|
[
"BSD-3-Clause"
] |
permissive
|
imxiangpeng/skia
|
dcdca3538564f2707fde10b43bdcaa6d9b5e0103
|
3e7cddaf32e280fe9f32eec5bfdd8168ca4941b6
|
refs/heads/master
| 2021-01-23T02:40:18.169644
| 2017-03-23T20:24:50
| 2017-03-24T01:24:57
| 86,016,340
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe for uploading DM results.
import calendar
from recipe_engine import recipe_api
DM_JSON = 'dm.json'
UPLOAD_ATTEMPTS = 5
VERBOSE_LOG = 'verbose.log'
class UploadDmResultsApi(recipe_api.RecipeApi):
def cp(self, name, src, dst, extra_args=None):
cmd = ['gsutil', 'cp']
if extra_args:
cmd.extend(extra_args)
cmd.extend([src, dst])
name = 'upload %s' % name
for i in xrange(UPLOAD_ATTEMPTS):
step_name = name
if i > 0:
step_name += ' (attempt %d)' % (i+1)
try:
self.m.step(step_name, cmd=cmd)
break
except self.m.step.StepFailure:
if i == UPLOAD_ATTEMPTS - 1:
raise
def run(self):
builder_name = self.m.properties['buildername']
revision = self.m.properties['revision']
results_dir = self.m.path['start_dir'].join('dm')
# Move dm.json and verbose.log to their own directory.
json_file = results_dir.join(DM_JSON)
log_file = results_dir.join(VERBOSE_LOG)
tmp_dir = self.m.path['start_dir'].join('tmp_upload')
self.m.shutil.makedirs('tmp dir', tmp_dir, infra_step=True)
self.m.shutil.copy('copy dm.json', json_file, tmp_dir)
self.m.shutil.copy('copy verbose.log', log_file, tmp_dir)
self.m.shutil.remove('rm old dm.json', json_file)
self.m.shutil.remove('rm old verbose.log', log_file)
# Upload the images.
image_dest_path = 'gs://%s/dm-images-v1' % self.m.properties['gs_bucket']
files_to_upload = self.m.file.glob(
'find images',
results_dir.join('*'),
test_data=[results_dir.join('someimage.png')],
infra_step=True)
if len(files_to_upload) > 0:
self.cp('images', results_dir.join('*'), image_dest_path)
# Upload the JSON summary and verbose.log.
now = self.m.time.utcnow()
summary_dest_path = '/'.join([
'dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
revision,
builder_name,
str(int(calendar.timegm(now.utctimetuple())))])
# Trybot results are further siloed by issue/patchset.
issue = str(self.m.properties.get('issue', ''))
patchset = str(self.m.properties.get('patchset', ''))
if self.m.properties.get('patch_storage', '') == 'gerrit':
issue = str(self.m.properties['patch_issue'])
patchset = str(self.m.properties['patch_set'])
if issue and patchset:
summary_dest_path = '/'.join((
'trybot', summary_dest_path, issue, patchset))
summary_dest_path = 'gs://%s/%s' % (self.m.properties['gs_bucket'],
summary_dest_path)
self.cp('JSON and logs', tmp_dir.join('*'), summary_dest_path,
['-z', 'json,log'])
|
[
"skia-commit-bot@chromium.org"
] |
skia-commit-bot@chromium.org
|
e8aa6f13db12e6079697b3754a63bb24bcc8c34c
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/test/espnet2/schedulers/test_noam_lr.py
|
1e34d4684444b4b4dde33466d11137fe8f499a5a
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 313
|
py
|
import torch
from espnet2.schedulers.noam_lr import NoamLR
def test_NoamLR():
linear = torch.nn.Linear(2, 2)
opt = torch.optim.SGD(linear.parameters(), 0.1)
sch = NoamLR(opt)
lr = opt.param_groups[0]["lr"]
opt.step()
sch.step()
lr2 = opt.param_groups[0]["lr"]
assert lr != lr2
|
[
"naoyuki.kamo829@gmail.com"
] |
naoyuki.kamo829@gmail.com
|
270aaddedb72e83d89edde7860500e85380b8b0d
|
4dd695521343d56ff943e8c1768343d7680714e3
|
/experiments/scripts_auto_crossdataset_ynoguti_braccent/config_SVM_1024_crossdataset.py
|
70cd8e2343abb6a9b2f80d3d2b006aed092d244f
|
[] |
no_license
|
natharb/environment
|
ea659ee541f6473e92b5b30c549e52b66f47b280
|
86e6cee6e01d2370abeb7c55a2c8a15001735919
|
refs/heads/master
| 2021-09-28T02:39:02.222966
| 2018-11-13T12:03:34
| 2018-11-13T12:03:34
| 139,762,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista (nathbapt@decom.fee.unicamp.br)
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/crossdataset_ynoguti_braccent/SVM/1024/temp/'
result_directory = './results/crossdataset_ynoguti_braccent/SVM/1024/results/'
sub_directory = 'subdirectory'
database = 'database_SVM_1024_crossdataset.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.SVMGMM(number_of_gaussians = 1024, kmeans_training_iterations = 10, gmm_training_iterations = 10,
training_threshold = 5e-4, variance_threshold = 5e-4, update_weights = True, update_means = True, update_variances = True, relevance_factor = 4, gmm_enroll_iterations = 1, responsibility_threshold = 0, INIT_SEED = 5489)
#parallel = 40
#verbose = 2
|
[
"nathbapt@decom.fee.unicamp.br"
] |
nathbapt@decom.fee.unicamp.br
|
378055851595a6f4509513667890d3915c1def51
|
3b504a983f1807ae7c5af51078bfab8c187fc82d
|
/client/gui/HUD2/features/AttitudeIndicator/AttitudeIndicatorSource.py
|
d55efec79392f37f2e6fbd53db0997fa8ae8c1b0
|
[] |
no_license
|
SEA-group/wowp_scripts
|
7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58
|
2fe54a44df34f2dcaa6860a23b835dcd8dd21402
|
refs/heads/master
| 2021-09-07T23:10:13.706605
| 2018-03-02T17:23:48
| 2018-03-02T17:23:48
| 117,280,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# Embedded file name: scripts/client/gui/HUD2/features/AttitudeIndicator/AttitudeIndicatorSource.py
from gui.HUD2.core.DataPrims import DataSource
from gui.HUD2.hudFeatures import Feature
class AttitudeIndicatorSource(DataSource):
def __init__(self, features):
self._model = features.require(Feature.GAME_MODEL).attitudeIndicator
self._uiSettings = features.require(Feature.UI_SETTINGS)
self._uiSettings.eAttitudeModeChanged += self._updateAttitudeMode
self._clientArena = features.require(Feature.CLIENT_ARENA)
if self._clientArena.isAllServerDataReceived():
self._setupModel(None)
else:
self._clientArena.onNewAvatarsInfo += self._setupModel
return
def _setupModel(self, newInfos):
self._model.attitudeMode = self._uiSettings.gameUI['attitudeMode']
def _updateAttitudeMode(self, *args, **kwargs):
self._model.attitudeMode = self._uiSettings.gameUI['attitudeMode']
def dispose(self):
self._clientArena.onNewAvatarsInfo -= self._setupModel
self._uiSettings.eAttitudeModeChanged += self._updateAttitudeMode
self._model = None
self._uiSettings = None
self._clientArena = None
return
|
[
"55k@outlook.com"
] |
55k@outlook.com
|
2dbeed1dcdd89e81a9c6703e2d459c3d9b55a577
|
ba46f774793ff06ae12cbed51a024142d2b0f63e
|
/topiary/cli/outputs.py
|
a4d26ea8ba21e21749e38ca4d7e35b53b424729f
|
[
"Apache-2.0"
] |
permissive
|
Saintyven/topiary
|
05fe9eb9b0a9bbb851564d0d835d967bf1fce6ab
|
04f0077bc4bf1ad350a0e78c26fa48c55fe7813b
|
refs/heads/master
| 2021-09-07T17:21:56.267182
| 2018-02-26T19:56:58
| 2018-02-26T19:56:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
# Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common commandline arguments for output files
"""
from __future__ import print_function, division, absolute_import
import logging
def add_output_args(arg_parser):
output_group = arg_parser.add_argument_group(
title="Output",
description="How and where to write results")
output_group.add_argument(
"--output-csv",
default=None,
help="Path to output CSV file")
output_group.add_argument(
"--output-html",
default=None,
help="Path to output HTML file")
output_group.add_argument(
"--output-csv-sep",
default=",",
help="Separator for CSV file")
output_group.add_argument(
"--subset-output-columns",
nargs="*")
output_group.add_argument(
"--rename-output-column",
nargs=2,
action="append",
help=(
"Rename original column (first parameter) to new"
" name (second parameter)"))
output_group.add_argument(
"--print-columns",
default=False,
action="store_true",
help="Print columns before writing data to file(s)")
return output_group
def write_outputs(
df,
args,
print_df_before_filtering=False,
print_df_after_filtering=False):
if print_df_before_filtering:
print(df)
if args.subset_output_columns:
subset_columns = []
for column in args.subset_output_columns:
if column not in df.columns:
logging.warn(
"Invalid column name '%s', available: %s" % (
column, list(df.columns)))
else:
subset_columns.append(column)
df = df[subset_columns]
if args.rename_output_column:
for (old_name, new_name) in args.rename_output_column:
if old_name not in df.columns:
logging.warn(
"Can't rename column '%s' since it doesn't exist, available: %s" % (
old_name, list(df.columns)))
else:
df.rename(columns={old_name: new_name}, inplace=True)
if print_df_after_filtering:
print(df)
if args.print_columns:
print("Columns:")
for column in df.columns:
print("-- %s" % column)
if args.output_csv:
print("Saving %s..." % args.output_csv)
df.to_csv(
args.output_csv,
index=True,
index_label="#",
sep=args.output_csv_sep)
if args.output_html:
print("Saving %s..." % args.output_html)
df.to_html(args.output_html, index=True)
|
[
"alex.rubinsteyn@gmail.com"
] |
alex.rubinsteyn@gmail.com
|
f0cfc11ac7bdbd46f6556b49a3637218927f3cb7
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/build/android/gyp/jinja_template.py
|
ba335a248b06620e7b8f9d1dac8dfd6b516e3ac5
|
[
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 6,456
|
py
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
from util import resource_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
# If |output| is same with the file content, we skip update and
# ninja's restat will avoid rebuilding things that depend on it.
if os.path.isfile(output_filename):
with codecs.open(output_filename, 'r', 'utf-8') as f:
if f.read() == output:
return
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
path_info = resource_utils.ResourceInfoFile()
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
path_info.AddMapping(relpath, input_filename)
path_info.Write(outputs_zip + '.info')
build_utils.ZipDir(outputs_zip, temp_dir)
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='GN-list of template files to process.')
parser.add_argument('--includes', default='',
help="GN-list of files that get {% include %}'ed.")
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
parser.add_argument('--check-includes', action='store_true',
help='Enable inputs and includes checks.')
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
includes = build_utils.ParseGnList(options.includes)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.check_includes:
all_inputs = set(processor.GetLoadedTemplates())
all_inputs.difference_update(inputs)
all_inputs.difference_update(includes)
if all_inputs:
raise Exception('Found files not listed via --includes:\n' +
'\n'.join(sorted(all_inputs)))
if __name__ == '__main__':
main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
1857e98731a98fbbf1200a924206070e99daf9c6
|
bb853b657537eca893032ad08e4c843b5195fa35
|
/dateandtime2.py
|
ff50587bccebcdb78c87b2dd95e50b62bff026b6
|
[] |
no_license
|
PrathameshDhumal/Artificial-Intelligence-Assistant-With-Python
|
584064e5219479576244d74aafb6f4e4dcbd16cc
|
6afcdf792a2e0e1d6168dfb732add3d63158f38d
|
refs/heads/main
| 2023-05-31T01:31:19.202678
| 2021-06-30T19:57:53
| 2021-06-30T19:57:53
| 379,478,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
import pyttsx3
import datetime
engine = pyttsx3.init()
def speak(audio):
engine.say(audio)
engine.runAndWait()
def time():
Time = datetime.datetime.now().strftime("%I:%M:%S")
speak(Time)
time()
def date():
year = int(datetime.datetime.now().year)
month= int(datetime.datetime.now().month)
date = int(datetime.datetime.now().day)
speak("The current date is ")
speak(date)
speak(month)
speak(year)
date()
|
[
"noreply@github.com"
] |
PrathameshDhumal.noreply@github.com
|
770681d3dbbe5a567c39708a2f499a677b0b69a4
|
c6ce21e5b8a906b0bf95cfcac9d84e243c876723
|
/PARALLAX/CODE/resnet_train.py
|
c41dca46d825c0b593e8b6d0f4d047ab9e5385a9
|
[] |
no_license
|
yuchanmo/cloud_class
|
6450b08017fbe72dde810620365a87cda7ae8abd
|
b3d2c6589f7a9b8c0340e00487f610a097373ec6
|
refs/heads/master
| 2020-06-01T06:48:19.669550
| 2019-06-07T04:04:51
| 2019-06-07T04:04:51
| 190,685,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,012
|
py
|
#Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet Train/Eval module.
"""
import time
import six
import sys
import cifar10_download
cifar10_download.download()
import cifar_input
import numpy as np
import resnet_model
import tensorflow as tf
import parallax
import parallax_config
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_data_path', '',
'Filepattern for training data.')
tf.app.flags.DEFINE_integer('image_size', 32, 'Image side length.')
tf.app.flags.DEFINE_string('ckpt_dir', '',
'Directory to keep the checkpoints. Should be a '
'parent directory of FLAGS.train_dir')
tf.app.flags.DEFINE_string('resource_info_file', 'resource_info',
'Resource information file')
tf.app.flags.DEFINE_string('run_option', 'PS',
'Distributed training architecture')
tf.app.flags.DEFINE_boolean('sync', True, '')
def train(hps):
"""Training loop."""
single_gpu_graph = tf.Graph()
with single_gpu_graph.as_default():
images, labels = cifar_input.build_input(
'cifar10', FLAGS.train_data_path, hps.batch_size, 'train')
model = resnet_model.ResNet(hps, images, labels, 'train')
model.build_graph()
truth = tf.argmax(model.labels, axis=1)
predictions = tf.argmax(model.predictions, axis=1)
precision = tf.reduce_mean(tf.to_float(tf.equal(predictions, truth)))
########################################################################
#### FIXME: Get session for distributed environments using Parallax ####
#### Pass parallax_config as an argument ####
########################################################################
parallax_sess, num_workers, worker_id, num_replicas_per_worker = \
parallax.parallel_run(single_gpu_graph,
FLAGS.resource_info_file,
sync=FLAGS.sync,
parallax_config=parallax_config.build_config())
for i in range(350000):
_, global_step, cost, precision_ = \
parallax_sess.run([model.train_op, model.global_step, model.cost, precision])
if i % 10 == 0:
print('step: %d, loss: %.3f, precision: %.3f' % (global_step[0], cost[0], precision_[0]))
# Tuning learning rate
train_step = global_step[0]
if train_step < 10000:
lrn_rate = 0.1
elif train_step < 15000:
lrn_rate = 0.01
elif train_step < 20000:
lrn_rate = 0.001
else:
lrn_rate = 0.0001
feed_dict = {model.lrn_rate: []}
for worker in range(num_replicas_per_worker):
feed_dict[model.lrn_rate].append(lrn_rate)
parallax_sess.run(model.global_step, feed_dict=feed_dict)
def main(_):
batch_size = 128
hps = resnet_model.HParams(batch_size=batch_size,
num_classes=10,
min_lrn_rate=0.0001,
lrn_rate=0.1,
num_residual_units=5,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1)
train(hps)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
[
"mojjijji@gmail.com"
] |
mojjijji@gmail.com
|
f4850aed962c3460094182316ff7933229ed1cb1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/310/97316/submittedfiles/minha_bib.py
|
d6b2c61dad7783f3393a133c2502721a863c2a10
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# -*- coding: utf-8 -*-
import random
def solicitaSimbolodoHumano(a):
a = input('Simbolo que quer jogar: ')
while a!='O' and a!='X' and a!='o' and a!='x':
a = input('Simbolo que quer jogar: ')
return 1
def sorteioPrimeiraJogada(a):
a = random.choice((0,1))
if a ==1:
print('Vencedor do sorteio para inicio do jogo : Jogador')
else:
print('Vencedor do sorteio para inicio do jogo : Computador')
def JogadaComputador():
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9d452f69e846718fd2cc84f675566da6c840c37e
|
791b790ce8a4ad93ab88cf9a2aea3b2dd70652dc
|
/Competitions/Codechef/MARCH17/Xenny and Alternating Tasks.py
|
b6e1e5fcced9841b3dcdab65f1ad2f9859b3d986
|
[
"MIT"
] |
permissive
|
Debasmita-01/Competitive-Programming
|
16d8778db5ff225519404e88afa4cccf830006b7
|
547859d6ded88a0d3977c2f6e747d80235c054cd
|
refs/heads/master
| 2022-12-25T11:14:55.129524
| 2020-10-01T05:22:01
| 2020-10-01T05:22:01
| 300,511,195
| 3
| 1
|
MIT
| 2020-10-02T06:06:30
| 2020-10-02T05:32:52
| null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
t = int(raw_input())
for i in xrange(0, t):
n = int(raw_input())
x = [int(j) for j in raw_input().split(" ")]
x.sort()
for j in xrange(1, n-1):
if x[j-1]+1 != x[j] and x[j-1]+2 != x[j+1]:
print x[j-1]
break
elif x[j-1]+1 != x[j] and x[j]+1 != x[j+1]:
print x[j]
break
elif x[j-1]+2 != x[j+1] and x[j]+1 != x[j+1]:
print x[j+1]
break
|
[
"manish.bisht490@gmail.com"
] |
manish.bisht490@gmail.com
|
89a0785d067ba85fdb5f0718923cb544b67b8140
|
00b1fe62aff1bbad885a1b13354239b07925c5c1
|
/catalyst_rl/rl/scripts/load_db.py
|
e27de3d61db62001fe0d5978f118d899921047fa
|
[
"Apache-2.0"
] |
permissive
|
catalyst-team/catalyst-rl
|
a78675c477bef478d73cd1e7101be6dbb7b586aa
|
75ffa808e2bbb9071a169a1a9c813deb6a69a797
|
refs/heads/master
| 2021-09-22T08:36:12.161991
| 2021-09-13T05:59:12
| 2021-09-13T05:59:12
| 247,928,934
| 50
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
#!/usr/bin/env python
# usage:
# catalyst_rl-rl load-db --db=redis --in-pkl ./my_db_0.pkl ./my_db_1.pkl
import argparse
import pickle
import numpy as np
from tqdm import tqdm
from catalyst_rl import utils
from catalyst_rl.rl.db import MongoDB, RedisDB
def build_args(parser):
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=12000)
parser.add_argument(
"--in-pkl",
"-P",
nargs="+",
metavar="PKL_PATH",
dest="in_pkl",
required=True
)
parser.add_argument(
"--db",
type=str,
choices=["redis", "mongo"],
default=None,
required=True
)
parser.add_argument("--min-reward", type=int, default=None)
utils.boolean_flag(
parser, "use-sqil", default=False, help="Use SQIL – 0 reward"
)
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args = parser.parse_args()
return args
def main(args, _=None):
db_fn = RedisDB if args.db == "redis" else MongoDB
db = db_fn(host=args.host, port=args.port)
for in_pkl_ in args.in_pkl:
with open(in_pkl_, "rb") as fin:
trajectories = pickle.load(fin)
for trajectory in tqdm(trajectories):
trajectory = utils.unpack_if_needed(trajectory)
if args.min_reward is not None \
and sum(trajectory[-2]) < args.min_reward:
continue
if args.use_sqil:
observation, action, reward, done = trajectory
trajectory = observation, action, np.zeros_like(reward), done
db.put_trajectory(trajectory)
if __name__ == "__main__":
args = parse_args()
main(args)
|
[
"scitator@gmail.com"
] |
scitator@gmail.com
|
6a20e5f8abbe7f63a1bdaa934e00d7d7693ebcfb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03014/s911800035.py
|
b678df13f7f47ee72f75b442123a31cda6f77d04
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
h, w = map(int, input().split())
s = [list(input()) for _ in range(h)]
u_light = [[0]*w for _ in range(h)] #上をてらせる数
d_light = [[0]*w for _ in range(h)] #下
l_light = [[0]*w for _ in range(h)] #左
r_light = [[0]*w for _ in range(h)] #右
def count_light(i, j):
if s[i][j] == '.':
return 1
else:
return -10000
for i in range(w):
d_light[-1][i] = count_light(-1, i)
u_light[0][i] = count_light(0, i)
for j in range(1, h):
for i in range(w):
d_light[-(j+1)][i] = max(d_light[-j][i], 0) + count_light(-(j+1), i)
u_light[j][i] = max(u_light[j-1][i], 0) + count_light(j, i)
for i in range(h):
l_light[i][0] = count_light(i, 0)
r_light[i][-1] = count_light(i, -1)
for j in range(1, w):
for i in range(h):
l_light[i][j] = max(l_light[i][j-1], 0) + count_light(i, j)
r_light[i][-(j+1)] = max(r_light[i][-j], 0) + count_light(i, -(j+1))
score = 0
tmp_score = 0
for i in range(h):
for j in range(w):
tmp_score = u_light[i][j] + d_light[i][j] + l_light[i][j] + r_light[i][j]
if tmp_score > score:
score = tmp_score
print(score-3) # 3回ダブっているので除去
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6321a92b239f65d266baddd409d0323cffabd579
|
89b6997b24e404c176358073626a8bfad7bcdb8e
|
/.history/courses/models_20210412233123.py
|
f22ac53d1746d99cb174542b2a8b300401606f5f
|
[] |
no_license
|
mohamedhawas123/Education-platform-django
|
513e64ac112880385402ce609077796578b4e9ee
|
7b83e66bba66b8b2b1a007f5818a534653e6abfb
|
refs/heads/main
| 2023-07-18T16:19:52.177886
| 2021-09-24T12:04:09
| 2021-09-24T12:04:09
| 352,306,462
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from .fields import OrderField
class Subject(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ('title', )
def __str__(self):
return self.title
class Course(models.Model):
owner = models.ForeignKey(User,related_name='courses_created' ,on_delete=models.CASCADE)
subject = models.ForeignKey(Subject, related_name='courses', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
slug= models.SlugField(max_length=200, unique=True)
overView = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created', )
def __str__(self):
return self.title
class Module(models.Model):
Course = models.ForeignKey(Course, related_name='moduels', on_delete=models.CASCADE)
title = models.CharField(max_length=254)
description = models.TextField(blank=True)
order = OrderField(blank=True, for_fields=['Course'])
class Content(models.Model):
moduel = models.ForeignKey(Module, related_name='contents', on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, limit_choices_to={
'model__in': (
'text',
'video',
'image',
'file'
)
})
object_id = models.PositiveIntegerField()
item = GenericForeignKey('content_type', 'object_id')
order = OrderField(blank=True, for_fields=['moduel'])
class ItemBase(models.Model):
owner = models.ForeignKey(User, related_name='%(class)s_related', on_delete=models.CASCADE)
title = models.CharField(max_length=250)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Text(ItemBase):
content = models.TextField()
class File(ItemBase):
file = models.FileField(upload_to='files')
class Image(ItemBase):
file = models.FileField(upload_to='images')
class Video(ItemBase):
url = models.URLField()
|
[
"mohamedhawas123@gmail.com"
] |
mohamedhawas123@gmail.com
|
a887c48538b070bc949cb678b712fd5ea562c029
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/streams/blocks_20201029145758.py
|
3227cd805384affb4014e12555cfee402b57bfa8
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
from django import forms
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
from wagtail.contrib.table_block.blocks import TableBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
internal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class RadioSelectBlock(blocks.ChoiceBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.field.widget = forms.RadioSelect(
choices=self.field.widget.choices
)
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie przycięty do rozmiaru 786 na 552 px.')
image_alignment = RadioSelectBlock(
choices = (
('left','Opraz po lewej stronie'),
('right', 'Obraz po prawej stronie'),
),
default = 'left',
help_text = 'Obraz po lewej stronie, tekst po prawej lub obraz po prawej stronie tekst po lewej.'
)
title = blocks.CharBlock(
max_length=60,
help_text='Maksymalna długość 60 znaków.'
)
text = blocks.CharBlock(
max_length = 140,
required = False,
)
link = Link()
class Meta:
template = 'streams/image_and_text_block.html'
icon = 'image'
label = 'Obraz & Tekst'
class CallToActionBlock(blocks.StructBlock):
title =blocks.CharBlock(
max_length = 200,
help_text = 'Maksymalnie 200 znaków.'
)
link = Link()
class Meta:
template = 'streams/call_to_action_block.html'
icon = 'plus'
label = 'Wezwanie do działania'
class PricingTableBlock(TableBlock):
"""Blok tabeli cen."""
class Meta:
template = 'streams/pricing_table_block.html'
label = 'Tabela cen'
icon = 'table'
help_text = 'Twoje tabele z cenami powinny zawierać zawsze 4 kolumny.'
class RichTextWithTitleBlock(blocks)
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
6a10fdec032287788f43ac694db394d334627b95
|
cd8f7ecd20c58ce1ae0fe3840f7c7ee961aa5819
|
/Find Duplicate File in System.py
|
ac5ae45decb6dd46daedbe7378173039d67c8773
|
[
"Apache-2.0"
] |
permissive
|
sugia/leetcode
|
9b0f2a3521b088f8f7e5633c2c6c17c76d33dcaf
|
6facec2a54d1d9f133f420c9bce1d1043f57ebc6
|
refs/heads/master
| 2021-06-05T07:20:04.099488
| 2021-02-24T07:24:50
| 2021-02-24T07:24:50
| 29,124,136
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,859
|
py
|
'''
Given a list of directory info including directory path, and all the files with contents in this directory, you need to find out all the groups of duplicate files in the file system in terms of their paths.
A group of duplicate files consists of at least two files that have exactly the same content.
A single directory info string in the input list has the following format:
"root/d1/d2/.../dm f1.txt(f1_content) f2.txt(f2_content) ... fn.txt(fn_content)"
It means there are n files (f1.txt, f2.txt ... fn.txt with content f1_content, f2_content ... fn_content, respectively) in directory root/d1/d2/.../dm. Note that n >= 1 and m >= 0. If m = 0, it means the directory is just the root directory.
The output is a list of group of duplicate file paths. For each group, it contains all the file paths of the files that have the same content. A file path is a string that has the following format:
"directory_path/file_name.txt"
Example 1:
Input:
["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
Output:
[["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
Note:
No order is required for the final output.
You may assume the directory name, file name and file content only has letters and digits, and the length of file content is in the range of [1,50].
The number of files given is in the range of [1,20000].
You may assume no files or directories share the same name in the same directory.
You may assume each given directory info represents a unique directory. Directory path and file info are separated by a single blank space.
Follow-up beyond contest:
Imagine you are given a real file system, how will you search files? DFS or BFS?
If the file content is very large (GB level), how will you modify your solution?
If you can only read the file by 1kb each time, how will you modify your solution?
What is the time complexity of your modified solution? What is the most time-consuming part and memory consuming part of it? How to optimize?
How to make sure the duplicated files you find are not false positive?
'''
class Solution(object):
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
# key = content (abcd), value = [file path1, file path2]
dic = {}
for path in paths:
tmp = path.split(' ')
for i in xrange(1, len(tmp)):
name, content = tmp[i].split('(')
if content in dic:
dic[content].append('/'.join([tmp[0], name]))
else:
dic[content] = ['/'.join([tmp[0], name])]
res = []
for k, v in dic.iteritems():
if len(v) > 1:
res.append(v)
return res
|
[
"noreply@github.com"
] |
sugia.noreply@github.com
|
b18a2b8e878ac6dab0ef153d54eb8846e3615e8d
|
1424812c4f211d3d5e356e8b3889a689162062f3
|
/arcade/python/62_check_participants.py
|
f8957ee5841a7be2c906e5af02edf03edd07382b
|
[] |
no_license
|
nazomeku/codefights
|
cb7d3c40be0809695ec524a87c88dbebcf5b47bc
|
b23f6816f9b5b0720feac1c49c31163923e0a554
|
refs/heads/master
| 2021-01-22T12:49:35.905165
| 2017-11-21T19:03:37
| 2017-11-21T19:03:37
| 102,357,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
"""Given the list of participants, your task is to return the list of
games for which too few people signed up."""
def check_participants(participants):
return [a for a, b in enumerate(participants) if a > b]
|
[
"cols.nazo@gmail.com"
] |
cols.nazo@gmail.com
|
fce324c6496b5a5fd24b5262554147cad286f36a
|
3efe2059de4c7efd1f58a385656d19098b7efd63
|
/deepiu/tools/ensemble-inference-v2.py
|
436205085a0efdf481c2d9609e180ad472bc2c8b
|
[] |
no_license
|
yangyaoyunshu/image-caption-ai-challenger2017
|
5d2e82b2f8d70ac6d4eb7a0e70f6b406e551189b
|
7f2c556587ea1e5c4583fe3b12b8d40c5a2aa2cc
|
refs/heads/master
| 2021-08-31T22:48:55.886186
| 2017-12-23T07:27:04
| 2017-12-23T07:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,927
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file ensemble-inference-v2.py
# \author chenghuige
# \date 2017-10-21 14:56:40.017795
# \Description This is time cosuming 1hour and 12 mintues, and not performance
# better so just use ensemble-inference.py will be fine
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('image_dir_', '/home/gezi/data2/data/ai_challenger/image_caption/pic', '')
flags.DEFINE_string('vocab', '/home/gezi/new/temp/image-caption/ai-challenger/tfrecord/seq-basic/vocab.txt', '')
flags.DEFINE_float('current_length_normalization_factor', None, '')
flags.DEFINE_float('length_normalization_fator', None, '')
import sys, os
import glob
import operator
import melt, gezi
from deepiu.util import text2ids
from deepiu.util.text2ids import texts2ids
from deepiu.util.text_predictor import TextPredictor
import numpy as np
input = sys.argv[1]
type = sys.argv[2]
text2ids.init()
if ',' in input:
files = input.split(',')
else:
files = glob.glob(input + '/model*.%s.txt' % type)
if not 'ensemble' in type:
files = [x for x in files if not 'ensemble' in x]
dir = os.path.dirname(files[0])
ensemble_input_file = 'ensemble.%s.txt' % type
print('files:', files, 'len(files)', len(files), file=sys.stderr)
print('ensemble input file:', ensemble_input_file, file=sys.stderr)
batch_size = int(sys.argv[3])
num_imgs_done = 0
def _predict(predictor, imgs, texts_list, m):
global num_imgs_done
raw_imgs = [melt.read_image(os.path.join(FLAGS.image_dir_, img + '.jpg')) for img in imgs]
text_ids_list = [texts2ids(texts) for texts in texts_list]
raw_imgs = np.array(raw_imgs)
text_ids_list = np.array(text_ids_list)
print([len(x) for x in text_ids_list], sum([len(x) for x in text_ids_list]), \
'num_imgs_done', num_imgs_done, file=sys.stderr)
scores_list = predictor.bulk_predict(raw_imgs, text_ids_list)
if num_imgs_done == 0:
print(scores_list.shape, scores_list, file=sys.stderr)
for img, texts, scores in zip(imgs, texts_list, scores_list):
for text, score in zip(texts, scores):
m[img][text] = score
num_imgs_done += batch_size
def predict(predictor, imgs, texts, m):
batch_imgs = []
batch_texts = []
for img, text in zip(imgs, texts):
batch_imgs.append(img)
batch_texts.append(text)
if len(batch_imgs) == batch_size:
_predict(predictor, batch_imgs, batch_texts, m)
batch_imgs = []
batch_texts = []
if batch_imgs:
_predict(predictor, batch_imgs, batch_texts, m)
candidates = {}
for line in open(ensemble_input_file):
l = line.strip().split('\t')
img, texts = l[0], l[-2]
texts = texts.split(' ')
candidates[img] = texts
for file in files:
model_dir = file.replace('.%s.txt'%type, '')
ofile = os.path.join(dir, '%s.ensemble.%s.txt' % (model_dir, type))
print('model_dir:', model_dir, 'ofile:', ofile)
if gezi.non_empty(ofile):
continue
out = open(ofile, 'w')
Predictor = TextPredictor
image_model = None
image_checkpoint_file = FLAGS.image_checkpoint_file or '/home/gezi/data/image_model_check_point/inception_resnet_v2_2016_08_30.ckpt'
image_model_name = melt.image.get_imagenet_from_checkpoint(image_checkpoint_file).name
print('image_model_name:', image_model_name)
if not melt.has_image_model(model_dir, image_model_name):
image_model = melt.image.ImageModel(image_checkpoint_file, image_model_name)
print('image_model:', image_model, file=sys.stderr)
predictor = Predictor(model_dir, image_model=image_model, vocab_path=FLAGS.vocab,
current_length_normalization_factor=FLAGS.current_length_normalization_factor,
length_normalization_fator=FLAGS.length_normalization_fator)
#predictor = None
m = {}
for line in open(file):
l = line.strip().split('\t')
img, texts , scores = l[0], l[-2], l[-1]
if img not in m:
m[img] = {}
texts = texts.split(' ')
scores = map(float, scores.split(' '))
for text, score in zip(texts, scores):
m[img][text] = score
imgs_tocalc = []
texts_tocalc = []
for img, texts in candidates.items():
texts_ = [x for x in texts if x not in m[img]]
if texts_:
imgs_tocalc.append(img)
texts_tocalc.append(texts_)
predict(predictor, imgs_tocalc, texts_tocalc, m)
for img, result in m.items():
sorted_result = sorted(result.items(), key=operator.itemgetter(1), reverse=True)
texts = []
scores = []
for text, score in sorted_result:
texts.append(text)
scores.append(str(score))
texts = ' '.join(texts)
scores = ' '.join(scores)
print(img, sorted_result[0][0], sorted_result[0][1], texts, scores, sep='\t', file=out)
|
[
"29109317@qq.com"
] |
29109317@qq.com
|
8ab8e5f98c41b9fb41c80d47225b946e72e9c11b
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R1/benchmark/startPyquil365.py
|
7bd3571d7cf4002ea5c98aabb057ee26101e6441
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,533
|
py
|
# qubit number=2
# total number=67
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=33
prog += Y(2) # number=56
prog += CZ(2,1) # number=34
prog += H(1) # number=35
prog += H(1) # number=3
prog += H(0) # number=45
prog += CNOT(2,1) # number=60
prog += CZ(1,0) # number=46
prog += H(0) # number=47
prog += Y(1) # number=15
prog += H(0) # number=64
prog += CZ(1,0) # number=65
prog += H(0) # number=66
prog += H(1) # number=19
prog += CZ(0,1) # number=20
prog += RX(-0.6000441968356504,1) # number=28
prog += H(1) # number=21
prog += H(1) # number=30
prog += CZ(0,1) # number=31
prog += H(1) # number=32
prog += H(1) # number=57
prog += CZ(0,1) # number=58
prog += H(1) # number=59
prog += CNOT(0,1) # number=51
prog += X(1) # number=52
prog += CNOT(0,1) # number=53
prog += CNOT(0,1) # number=50
prog += H(2) # number=29
prog += H(1) # number=36
prog += CZ(0,1) # number=37
prog += Y(2) # number=44
prog += H(1) # number=38
prog += Z(1) # number=55
prog += H(1) # number=61
prog += CZ(0,1) # number=62
prog += H(1) # number=63
prog += Z(1) # number=11
prog += RX(-1.1780972450961724,2) # number=54
prog += H(1) # number=42
prog += H(0) # number=39
prog += CZ(1,0) # number=40
prog += H(0) # number=41
prog += CNOT(2,1) # number=26
prog += Y(1) # number=14
prog += CNOT(1,0) # number=5
prog += X(1) # number=6
prog += Z(1) # number=8
prog += X(1) # number=7
prog += H(2) # number=43
prog += RX(-2.42845112122491,1) # number=25
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil365.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
24d0293db4c4da28e8dcbdb081b1ca13f6d8bde4
|
55e28e35db5bf6a844df3fb47080500b115a893e
|
/day6/test/fan2.py
|
d15faad00febdceb92f9769a970ee29416ca85f6
|
[] |
no_license
|
pylarva/Python
|
5743ffa4a69db42b642d51b62f9e9b69ddbc1a72
|
71b484950e6dbdcf708726a68a3386d0d6ddc07f
|
refs/heads/master
| 2020-04-19T09:11:11.195393
| 2017-11-16T07:32:59
| 2017-11-16T07:32:59
| 67,507,687
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:lichengbing
# import fan1
#
# user_input = input('输入编号:')
# if hasattr(fan1, user_input):
# func = getattr(fan1, user_input)
# func()
# else:
# print('no module...')
user_input = input('请输入URL:')
k, v = user_input.split('/')
obj = __import__('lib.' + k, fromlist=True)
if hasattr(obj, v):
func = getattr(obj, v)
func()
else:
print('no module...')
|
[
"1326126359@qq.com"
] |
1326126359@qq.com
|
3c21849b572848e17c219d6d9115dda9eaf3d56e
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerrank/Algorithms/Cut the Tree/test.py
|
71dbf8141286bc70cf3538beaf3323dcc9737296
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 796
|
py
|
import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.cutTheTree(
[100, 200, 100, 500, 100, 600],
[
[1, 2],
[2, 3],
[2, 5],
[4, 5],
[5, 6],
]
), 400)
def test_case_1(self):
self.assertEqual(solution.cutTheTree(
[205, 573, 985, 242, 830, 514, 592, 263, 142, 915],
[
[2, 8],
[10, 5],
[1, 7],
[6, 9],
[4, 3],
[8, 10],
[5, 1],
[7, 6],
[9, 4],
]
), 99)
if __name__ == '__main__':
unittest.main()
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
cbb187e7d59019198c646d702aff8fbadc4169a3
|
fa89010f366aa33967c12636bf6cfae6105a9ee5
|
/ex7/nation_mood.py
|
cee5016c3dfd5694fae9bb9cc3434bbd28db8739
|
[] |
no_license
|
borgr/intro2cs
|
4db1985b789d0938d7c9cecddbe5a302f284bd95
|
9030d9831a168d9636093bd5211926666298d80f
|
refs/heads/master
| 2020-05-29T19:35:20.829664
| 2016-10-22T06:07:22
| 2016-10-22T06:07:22
| 15,959,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,976
|
py
|
#############################################################
# FILE : nation_mood.py
# WRITER : Leshem Choshen + borgr + 305385338
# EXERCISE : intro2cs ex7 200132014
# DESCRIPTION:nation wide tweet functions
#############################################################
from data import load_tweets
from geo import us_states, Position, geo_distance
from tweet import Tweet
from geo_tweet_tools import find_center, find_closest_state, group_tweets_by_state
def most_talkative_state(tweets,find_state):
"""Return the state that has the largest number of tweets containing term.
>>> state_centers = {n: find_center(s) for n, s in us_states.items()}
>>> tweets = load_tweets('texas')
>>> find_state = find_closest_state(state_centers);
>>> most_talkative_state(tweets,find_state)
'TX'
>>> tweets = load_tweets('sandwich')
>>> most_talkative_state(tweets,find_state)
'NJ'
"""
most_tweets = -float("inf")
most_state = None
grouped = group_tweets_by_state(tweets,find_state)
for state in grouped:
state_tweets = len(grouped[state])
if most_tweets < state_tweets:
most_tweets = state_tweets
most_state = state
return most_state
def average_sentiments(tweets_by_state,word_sentiments):
"""Calculate the average sentiment of the states by averaging over all
the tweets from each state. Return the result as a dictionary from state
names to average sentiment values (numbers).
If a state has no tweets with sentiment values, leave it out of the
dictionary entirely. Do NOT include states with no tweets, or with tweets
that have no sentiment, as 0. 0 represents neutral sentiment, not unknown
sentiment.
tweets_by_state -- A dictionary from state names to lists of tweets
"""
avarage = {}
for state in tweets_by_state.keys():
sentiments = []
for tweet in tweets_by_state[state]:
sentiment = tweet.get_sentiment(word_sentiments)
if sentiment is not None:
sentiments.append(Sentiment)
if sentiments:
print(state, sentiments)
avarage.update({state: sum(sentiments)/len (sentiments)})
return avarage
def group_tweets_by_hour(tweets):
"""Return a list of lists of tweets that are gouped by the hour
they were posted.
The indexes of the returned list represent the hour when they were posted
- the integers 0 through 23.
tweets_by_hour[i] is the list of all
tweets that were posted between hour i and hour i + 1. Hour 0 refers to
midnight, while hour 23 refers to 11:00PM.
To get started, read the Python Library documentation for datetime
objects:
http://docs.python.org/py3k/library/datetime.html#datetime.datetime
tweets -- A list of tweets to be grouped
"""
return [[tweet for tweet in tweets
if tweet.get_time().hour == hour]
for hour in range(24)]
|
[
"noreply@github.com"
] |
borgr.noreply@github.com
|
c843c8f5ac94babd9d6dda5862e4393a08c25abd
|
b0a1884cd6c40362085dc08c7a091ed7cf1ece7f
|
/eelbrain/tests/test_mne.py
|
06e2009c988be2ab9c2632f8d57ae4e63439b6be
|
[] |
no_license
|
LauraGwilliams/Eelbrain
|
4f37dbcc314063e92425dadf9b1f9f2aeea69a9c
|
d04fa2e7108c5f683fc145fc44a794d39928f2cb
|
refs/heads/master
| 2021-01-18T16:00:36.727474
| 2015-10-23T13:38:17
| 2015-10-23T13:43:17
| 34,459,099
| 0
| 0
| null | 2015-04-23T13:51:09
| 2015-04-23T13:51:09
| null |
UTF-8
|
Python
| false
| false
| 7,674
|
py
|
"""Test mne interaction"""
from itertools import izip
import os
from nose.tools import eq_, ok_, assert_less_equal, assert_not_equal, assert_in
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import mne
from eelbrain import datasets, load, testnd, morph_source_space, Factor
from eelbrain._data_obj import asndvar, SourceSpace, _matrix_graph
from eelbrain._mne import shift_mne_epoch_trigger, combination_label
from eelbrain.tests.test_data import assert_dataobj_equal
# mne paths
data_dir = mne.datasets.sample.data_path()
subjects_dir = os.path.join(data_dir, 'subjects')
def test_source_estimate():
"Test SourceSpace dimension"
mne.set_log_level('warning')
ds = datasets.get_mne_sample(src='ico')
dsa = ds.aggregate('side')
# test auto-conversion
asndvar('epochs', ds=ds)
asndvar('epochs', ds=dsa)
asndvar(dsa['epochs'][0])
# source space clustering
res = testnd.ttest_ind('src', 'side', ds=ds, samples=0, pmin=0.05,
tstart=0.05, mintime=0.02, minsource=10)
eq_(res.clusters.n_cases, 52)
# test disconnecting parc
src = ds['src']
source = src.source
parc = source.parc
orig_conn = set(map(tuple, source.connectivity()))
disc_conn = set(map(tuple, source.connectivity(True)))
ok_(len(disc_conn) < len(orig_conn))
for pair in orig_conn:
s, d = pair
if pair in disc_conn:
eq_(parc[s], parc[d])
else:
assert_not_equal(parc[s], parc[d])
# threshold-based test with parc
srcl = src.sub(source='lh')
res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, pmin=0.05,
tstart=0.05, mintime=0.02, minsource=10,
parc='source')
eq_(res._cdist.dist.shape[1], len(srcl.source.parc.cells))
label = 'superiortemporal-lh'
c_all = res.find_clusters(maps=True)
c_label = res.find_clusters(maps=True, source=label)
assert_array_equal(c_label['location'], label)
for case in c_label.itercases():
id_ = case['id']
idx = c_all['id'].index(id_)[0]
eq_(case['v'], c_all[idx, 'v'])
eq_(case['tstart'], c_all[idx, 'tstart'])
eq_(case['tstop'], c_all[idx, 'tstop'])
assert_less_equal(case['p'], c_all[idx, 'p'])
assert_dataobj_equal(case['cluster'],
c_all[idx, 'cluster'].sub(source=label))
# threshold-free test with parc
res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, tstart=0.05,
parc='source')
cl = res.find_clusters(0.05)
eq_(cl.eval("p.min()"), res.p.min())
mp = res.masked_parameter_map()
assert_in(mp.min(), (0, res.t.min()))
assert_in(mp.max(), (0, res.t.max()))
# indexing source space
s_sub = src.sub(source='fusiform-lh')
idx = source.index_for_label('fusiform-lh')
s_idx = src[idx]
assert_dataobj_equal(s_sub, s_idx)
def test_dataobjects():
"Test handing MNE-objects as data-objects"
ds = datasets.get_mne_sample(sns=True)
ds['C'] = Factor(ds['index'] > 155, labels={False: 'a', True: 'b'})
sds = ds.sub("side % C != ('L', 'b')")
ads = sds.aggregate('side % C')
eq_(ads.n_cases, 3)
# connectivity
sensor = ds['sns'].sensor
c = sensor.connectivity()
assert_array_equal(c[:, 0] < c[:, 1], True)
eq_(c.max(), len(sensor) - 1)
def test_epoch_trigger_shift():
"Test the shift_mne_epoch_trigger() function"
epochs = datasets.get_mne_sample(sns=True, sub="[1,2,3]")['epochs']
n_lost_start = np.sum(epochs.times < epochs.tmin + 0.05)
n_lost_end = np.sum(epochs.times > epochs.tmax - 0.05)
data = epochs.get_data()
epochs_s = shift_mne_epoch_trigger(epochs, [0, 0, 0])
assert_array_equal(epochs_s.get_data(), data)
epochs_s = shift_mne_epoch_trigger(epochs, [-0.05, 0., 0.05])
data_s = epochs_s.get_data()
assert_array_equal(data_s[0], data[0, :, : -(n_lost_end + n_lost_start)])
assert_array_equal(data_s[1], data[1, :, n_lost_start: -n_lost_end])
assert_array_equal(data_s[2], data[2, :, n_lost_end + n_lost_start:])
assert_allclose(epochs_s.times, epochs.times[n_lost_start: -n_lost_end],
rtol=1e-1, atol=1e-3) # ms accuracy
epochs_s = shift_mne_epoch_trigger(epochs, [0.05, 0., 0.05])
data_s = epochs_s.get_data()
assert_array_equal(data_s[0], data[0, :, n_lost_end:])
assert_array_equal(data_s[1], data[1, :, :-n_lost_end])
assert_array_equal(data_s[2], data[2, :, n_lost_end:])
assert_allclose(epochs_s.times, epochs.times[:-n_lost_end],
rtol=1e-1, atol=1e-3) # ms accuracy
def test_combination_label():
"Test combination label creation"
labels = {l.name: l for l in
mne.read_labels_from_annot('fsaverage', subjects_dir=subjects_dir)}
# standard
l = combination_label('temporal', "superiortemporal + middletemporal + inferiortemporal", labels)
lh = labels['superiortemporal-lh'] + labels['middletemporal-lh'] + labels['inferiortemporal-lh']
rh = labels['superiortemporal-rh'] + labels['middletemporal-rh'] + labels['inferiortemporal-rh']
eq_(len(l), 2)
eq_(l[0].name, 'temporal-lh')
eq_(l[1].name, 'temporal-rh')
assert_array_equal(l[0].vertices, lh.vertices)
assert_array_equal(l[1].vertices, rh.vertices)
# only rh
l = combination_label('temporal-rh', "superiortemporal + middletemporal + inferiortemporal", labels)
eq_(len(l), 1)
eq_(l[0].name, 'temporal-rh')
assert_array_equal(l[0].vertices, rh.vertices)
# names with .
labels = {l.name: l for l in
mne.read_labels_from_annot('fsaverage', 'PALS_B12_Brodmann', subjects_dir=subjects_dir)}
l = combination_label('Ba38-lh', "Brodmann.38", labels)[0]
assert_array_equal(l.vertices, labels['Brodmann.38-lh'].vertices)
def test_morphing():
mne.set_log_level('warning')
sss = datasets._mne_source_space('fsaverage', 'ico-4', subjects_dir)
vertices_to = [sss[0]['vertno'], sss[1]['vertno']]
ds = datasets.get_mne_sample(-0.1, 0.1, src='ico', sub='index==0', stc=True)
stc = ds['stc', 0]
morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertices,
vertices_to, None, subjects_dir)
ndvar = ds['src']
morphed_ndvar = morph_source_space(ndvar, 'fsaverage')
morphed_stc = mne.morph_data_precomputed('sample', 'fsaverage', stc,
vertices_to, morph_mat)
assert_array_equal(morphed_ndvar.x[0], morphed_stc.data)
morphed_stc_ndvar = load.fiff.stc_ndvar([morphed_stc], 'fsaverage', 'ico-4',
subjects_dir, 'dSPM', False, 'src',
parc=None)
assert_dataobj_equal(morphed_ndvar, morphed_stc_ndvar)
def test_source_space():
"Test SourceSpace dimension"
for subject in ['fsaverage', 'sample']:
mne_src = datasets._mne_source_space(subject, 'ico-4', subjects_dir)
vertno = [mne_src[0]['vertno'], mne_src[1]['vertno']]
ss = SourceSpace(vertno, subject, 'ico-4', subjects_dir, 'aparc')
# connectivity
conn = ss.connectivity()
mne_conn = mne.spatial_src_connectivity(mne_src)
assert_array_equal(conn, _matrix_graph(mne_conn))
# sub-space connectivity
sssub = ss[ss.dimindex('superiortemporal-rh')]
ss2 = SourceSpace(vertno, subject, 'ico-4', subjects_dir, 'aparc')
ss2sub = ss2[ss2.dimindex('superiortemporal-rh')]
assert_array_equal(sssub.connectivity(), ss2sub.connectivity())
|
[
"christianmbrodbeck@gmail.com"
] |
christianmbrodbeck@gmail.com
|
2fc98b738bd56aa8aff7591590b98098af6a04b0
|
37b014820aef8b83e3eca3f102b3d04ef504066e
|
/readcsv/settings.py
|
65777c4908e4034159ad3c696a1251fa850b447a
|
[] |
no_license
|
juniorcarvalho/readcsv
|
d8ae35efe65c90363920a1049c17554b0757fe15
|
0a20ad4f96929b9f5bab703f2c47f6c0ff345bc3
|
refs/heads/master
| 2022-05-25T08:00:16.372555
| 2021-04-08T20:38:00
| 2021-04-08T20:38:00
| 208,878,426
| 1
| 0
| null | 2022-04-22T22:17:01
| 2019-09-16T19:14:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,052
|
py
|
"""
Django settings for readcsv project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default="*", cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'readcsv.core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'readcsv.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'readcsv.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DEFAULT_DBURL = "sqlite:///" + os.path.join(BASE_DIR, "db.sqlite3")
DATABASES = {"default": config("DATABASE_URL", default=DEFAULT_DBURL, cast=dburl)}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGGING_APPNAME = 'readcsvlog'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, LOGGING_APPNAME + '.log'),
'maxBytes': 1024 * 1024 * 15,
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
LOGGING_APPNAME: {
'handlers': ['console', 'mail_admins', 'logfile', ],
'level': 'DEBUG',
}
}
}
# twitter
TWITTER_API_KEY = config('TWITTER_API_KEY', default='')
TWITTER_SECRET_KEY = config('TWITTER_SECRET_KEY', default='')
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN', default='')
TWITTER_ACCESS_TOKEN_SECRET = config('TWITTER_ACCESS_TOKEN_SECRET', default='')
|
[
"joseadolfojr@gmail.com"
] |
joseadolfojr@gmail.com
|
a962ff1bdbf5794c6ccf3662675aacd15b94ab20
|
de4c5ecaf541d67e7cbf02837d93cf303d23b5da
|
/tests/app/views/home_tests.py
|
457f11123076d3a77c42d472cd58e5fe3b42dc01
|
[
"Apache-2.0"
] |
permissive
|
shadowmint/py-test-watcher
|
d140064cafeb0b2efce8a403a3abd63322f812d0
|
36d33206b104c81e2d6acebdbed2dddee71fe2a7
|
refs/heads/master
| 2021-01-19T14:07:13.441335
| 2013-07-01T06:07:56
| 2013-07-01T06:07:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
# Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import bootstrap
import pau
import os
from pau.model import *
from nark import *
from pau.views import Home
class HomeTests(unittest.TestCase):
# db for this test
db_name = "HomeTests.sqlite"
def setup(self):
""" Setup db and return instance """
self.config = pau.IConfig
self.session = pau.ISession
pau.resolve(self)
self.session.assets = Assets()
self.config.db = self.db_name
self.config.db_debug = False
self.db = pau.IDb
pau.resolve(self)
self.prefs = Prefs()
pau.resolve(self.prefs)
# Instance
i = Home()
pau.resolve(i)
return i
def teardown(self):
self.db.reset()
try:
os.remove(self.db_name)
except:
pass
def test_can_create_instance(self):
a = Assert()
i = self.setup()
a.not_null(i, "Unable to create instance")
self.teardown()
def test_has_setup_fails(self):
a = Assert()
i = self.setup()
rtn = i.has_setup("", "")
a.false(rtn["result"], "Failed to not find preferences")
self.teardown()
def test_has_setup_passes(self):
a = Assert()
i = self.setup()
self.prefs.add(pau.Preferences.LOCATION, "VALUE")
rtn = i.has_setup("", "")
a.true(rtn["result"], "Failed to find preferences")
self.teardown()
def test_preferences(self):
a = Assert()
i = self.setup()
self.prefs.add(pau.Preferences.LOCATION, "VALUE")
rtn = i.preference("", "LOCATION")
a.equals(rtn["result"], "VALUE", "Failed to find preference by key")
self.teardown()
def test_flash(self):
a = Assert()
i = self.setup()
i.flash_service.notice("Hello World")
i.flash_service.success("Hello Again")
rtn = i.flash("", "")
a.equals(rtn["result"], "Hello World", "Failed to return oldest message")
rtn = i.flash("", "")
a.equals(rtn["result"], "Hello Again", "Failed to return second message")
rtn = i.flash("", "")
a.false(rtn["result"], "Invalid return when no messages")
self.teardown()
if __name__ == "__main__":
unittest.main()
|
[
"linderd@iinet.net.au"
] |
linderd@iinet.net.au
|
576a5caebe9274dbe6a976f5be2cda765413cea2
|
0faf534ebb6db6f32279e5bee25b968bd425ce3a
|
/tests/extension/thread_/stream_sink_fifo/test_thread_stream_sink_fifo.py
|
2fbe7fdf2573c48b8a204e9797df3ef6b7351a35
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PyHDI/veriloggen
|
e8647cb2d40737d84e31d6b89c5799bab9cbd583
|
f2b1b9567150af097eed1b5e79ba2b412854ef43
|
refs/heads/develop
| 2023-08-09T10:02:35.626403
| 2023-08-09T00:50:14
| 2023-08-09T00:50:14
| 37,813,184
| 282
| 60
|
Apache-2.0
| 2023-07-20T03:03:29
| 2015-06-21T15:05:30
|
Python
|
UTF-8
|
Python
| false
| false
| 547
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import thread_stream_sink_fifo
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = thread_stream_sink_fifo.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = [line for line in rslt.splitlines() if line.startswith('# verify:')][0]
assert(verify_rslt == '# verify: PASSED')
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
be41226a5a51288ece78ea6a8101e94652515a8a
|
74b812828a80190636523cbad4f3a6fc239484ba
|
/openff/bespokefit/executor/services/optimizer/app.py
|
cb29e0c1ed66c8e77bb693ced204a9a750190e59
|
[
"MIT"
] |
permissive
|
openforcefield/openff-bespokefit
|
ef438ddc9a072a280e155d1e2d097068a369f73f
|
97262756c5c014e9bd5f799d64755b7f73a6160e
|
refs/heads/main
| 2023-08-17T07:09:11.715404
| 2023-08-04T09:43:57
| 2023-08-04T09:43:57
| 241,694,600
| 29
| 4
|
MIT
| 2023-09-08T06:14:39
| 2020-02-19T18:31:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
import json
from fastapi import APIRouter
from qcelemental.util import serialize
from openff.bespokefit.executor.services import current_settings
from openff.bespokefit.executor.services.optimizer import worker
from openff.bespokefit.executor.services.optimizer.models import (
OptimizerGETResponse,
OptimizerPOSTBody,
OptimizerPOSTResponse,
)
from openff.bespokefit.executor.utilities.celery import get_task_information
router = APIRouter()
__settings = current_settings()
__GET_ENDPOINT = "/" + __settings.BEFLOW_OPTIMIZER_PREFIX + "/{optimization_id}"
@router.get(__GET_ENDPOINT)
def get_optimization(optimization_id: str) -> OptimizerGETResponse:
task_info = get_task_information(worker.celery_app, optimization_id)
# noinspection PyTypeChecker
return {
"id": optimization_id,
"self": __settings.BEFLOW_API_V1_STR
+ __GET_ENDPOINT.format(optimization_id=optimization_id),
"status": task_info["status"],
"result": task_info["result"],
"error": json.dumps(task_info["error"]),
}
@router.post("/" + __settings.BEFLOW_OPTIMIZER_PREFIX)
def post_optimization(body: OptimizerPOSTBody) -> OptimizerPOSTResponse:
# We use celery delay method in order to enqueue the task with the given
# parameters
task = worker.optimize.delay(
optimization_input_json=serialize(body.input_schema, "json")
)
return OptimizerPOSTResponse(
id=task.id,
self=__settings.BEFLOW_API_V1_STR
+ __GET_ENDPOINT.format(optimization_id=task.id),
)
|
[
"noreply@github.com"
] |
openforcefield.noreply@github.com
|
b2ccc54baa8f5810be524d0f142b9b43562381c9
|
a8592d34f144b71794ebf30f1c2a1b5faf0b053c
|
/Praktikum2023/Modul_08/codes/diffusion_02.py
|
c407706419324a395034cba60ab038151f2df39b
|
[] |
no_license
|
f-fathurrahman/ffr-MetodeNumerik
|
ee9a6a7153b174b1ba3d714fe61ccbd1cb1dd327
|
e3a9da224c0fd5b32e671708e890018a3c4104c4
|
refs/heads/master
| 2023-07-19T22:29:38.810143
| 2023-07-07T10:02:34
| 2023-07-07T10:02:34
| 107,272,110
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
# Explicit method (forward Euler) for diffusion equation
# Using two vectors for current and future time
import numpy as np
import matplotlib.pyplot as plt
# Global variables !!!
L = 1.5
Tfinal = 1.0
α = 0.1
DO_PLOT = True
# Manufactured solution
def u_exact(t, x):
return 5*x*t*(L-x)
def initial_cond(x):
return u_exact(0, x)
def source_term(t, x):
return 10*α*t + 5*x*(L - x)
Nx = 25
x = np.linspace(0.0, L, Nx+1)
Δx = x[1] - x[0]
print("Δx = ", Δx)
Nt = 200
t = np.linspace(0.0, Tfinal, Nt+1)
Δt = t[1] - t[0]
print("Δt = ", Δt)
print("Final t = ", t[-1])
F = α * Δt / Δx**2
print("F = ", F)
if F > 0.5:
print("WARNING: solution is not stable")
# exit()
# Use only two vectors for the solution
un = np.zeros(Nx+1)
unp1 = np.zeros(Nx+1)
un[:] = initial_cond(x)
for n in range(0,Nt):
# Apply boundary condition
un[0] = 0.0 # syarat batas pada x=0
un[Nx] = 0.0 # syarat batas pada x=L
for i in range(1,Nx):
fni = source_term(t[n], x[i])
unp1[i] = un[i] + F*( un[i+1] - 2*un[i] + un[i-1] ) + Δt*fni
#
un[:] = unp1[:] # update for the next iteration
if DO_PLOT:
plt.clf()
plt.plot(x, un)
plt.title("t = " + str(t[n]))
plt.savefig("IMG_diff1d_explicit_" + str(n) + ".png", dpi=150)
print("n = " + str(n) + " is done")
if DO_PLOT:
plt.clf()
plt.plot(x, un, label="numerical")
plt.plot(x, u_exact(t[Nt],x), label="exact sol")
plt.title("t = " + str(t[Nt]))
plt.legend()
plt.savefig("IMG_diff1d_explicit_COMPARE_" + str(n) + ".png", dpi=150)
# Difference between exact solution and numerical solution
Δu = u_exact(t[Nt],x) - un
norm_Δu = np.linalg.norm(Δu)
print("norm_du = ", norm_Δu)
|
[
"fadjar.fathurrahman@gmail.com"
] |
fadjar.fathurrahman@gmail.com
|
cbd55ab8d75ea16cc6b47917fdb3d4bd5b865eac
|
a3a3e1298db9555eda37f8da0c74a437d897cb1f
|
/compiled/Python3/Euler_Problem-030.py
|
a12edacad0729aad1effb41722fb40fbead3d74c
|
[
"MIT"
] |
permissive
|
LStepanek/Project-Euler_Befunge
|
58f52254ee039ef6a5204fc65e62426c5e9d473a
|
f35fb2adecd737e410dee7b89b456cd61b25ce78
|
refs/heads/master
| 2021-01-01T17:51:52.413415
| 2017-05-03T17:23:01
| 2017-05-03T17:26:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
#!/usr/bin/env python3
# transpiled with BefunCompile v1.1.0 (c) 2015
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
sa(0)
sa(1)
sa(1)
sa(0)
sa(5904)
return 1
def _1():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+1);
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return 2
def _2():
return (13)if(sp()!=0)else(3)
def _3():
global t0
sp();
v0=sp()
sa(sp()-v0)
t0=sp()
return (12)if((t0)!=0)else(4)
def _4():
sa(sp()*59049);
return 5
def _5():
global t0
global t1
global t2
global t3
global t4
global t5
global t6
global t7
sa(sr());
sa(sr());
sa(tm(sr(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t1=sp()
sa(td(sp(),10))
sa(tm(sr(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t2=sp()
sa(td(sp(),10))
sa(tm(sr(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t3=sp()
sa(td(sp(),10))
sa(tm(sr(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t4=sp()
sa(td(sp(),10))
sa(tm(sr(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t5=sp()
sa(td(sp(),10))
sa(tm(sr(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t6=sp()
sa(td(sp(),10))
sa(tm(sp(),10))
sa(sr());
sa(sr());
sa(sp()*sp());
sa(sr());
sa(sp()*sp());
t0=sp()
sa(sp()*t0);
t7=sp()
t0=t7+t6
t6=t5+t0
t0=t4+t6
t4=t3+t0
t0=t2+t4
t2=t1+t0
sa(sp()-t2);
t0=sp()
return (6)if((t0)!=0)else(11)
def _6():
sa(sp()-1);
sa(sr());
sa((0)if(sp()!=0)else(1))
return (7)if(sp()!=0)else(5)
def _7():
sp();
sp();
return 8
def _8():
sa(sp()+sp());
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return (9)if(sp()!=0)else(10)
def _9():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+sp());
return 8
def _10():
sp();
print(sp(),end="",flush=True)
return 14
def _11():
sa(sr());
return 6
def _12():
sa(sp()+1);
sa(sr());
sa(sr()*59049)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return 2
def _13():
sa(td(sp(),10))
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13]
c=0
while c<14:
c=m[c]()
|
[
"mailport@mikescher.de"
] |
mailport@mikescher.de
|
d237833b720ab49c6906e5708002d2834372ac3d
|
9ddfd30620c39fb73ac57e79eae0a001c45db45f
|
/addons/prisme_contact_enhancement/models/__init__.py
|
56357c83a049980c861e401c12364ef855b55a5d
|
[] |
no_license
|
zamzamintl/silver
|
a89bacc1ba6a7a59de1a92e3f7c149df0468e185
|
8628e4419c4ee77928c04c1591311707acd2465e
|
refs/heads/master
| 2023-01-06T20:29:25.372314
| 2020-10-29T21:02:41
| 2020-10-29T21:02:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
# -*- coding: utf-8 -*-
###########################################################################
#
# Prisme Solutions Informatique SA
# Copyright (c) 2016 Prisme Solutions Informatique SA <http://prisme.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public Lic
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Project ID: OERP-001-01
#
# Modifications:
#
##########################################################################
from . import res_partner_model
|
[
"mohamed.abdelrahman@businessborderlines.com"
] |
mohamed.abdelrahman@businessborderlines.com
|
d6830d4ff16376893003b80808781e3aec0c3bb2
|
c104dbd09a853725cb4f4b17df7c5dd59d47e04e
|
/test/test_modify_alert_policy.py
|
83402c1d6893da6593578261001770b0aa9a0dea
|
[
"Apache-2.0"
] |
permissive
|
bm-lab/opsgenie-python-sdk
|
5a64e2c24f1b9168ecadf482ba8084ba27a659fc
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
refs/heads/master
| 2021-10-09T03:18:48.101672
| 2018-12-15T01:03:36
| 2018-12-20T15:13:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opsgenie_swagger
from opsgenie_swagger.models.modify_alert_policy import ModifyAlertPolicy # noqa: E501
from opsgenie_swagger.rest import ApiException
class TestModifyAlertPolicy(unittest.TestCase):
"""ModifyAlertPolicy unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testModifyAlertPolicy(self):
"""Test ModifyAlertPolicy"""
# FIXME: construct object with mandatory attributes with example values
# model = opsgenie_swagger.models.modify_alert_policy.ModifyAlertPolicy() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"c.chary@criteo.com"
] |
c.chary@criteo.com
|
402ccdca5869a573340f6de12ef5e4dbbe9e588a
|
e6f0b705a768229c160a2603393709b4b8a683be
|
/.history/books/api/views_20210424164436.py
|
c4d52011855368b33b5770873ed269be0a6587e8
|
[] |
no_license
|
walaaElbasha/bookstore_django_basics
|
0741e16e3fe69d4460c095bb8afc964f52d96f1d
|
2037a434d7bdb0ca1954ca3de9f56655b77ec64e
|
refs/heads/main
| 2023-04-19T07:01:05.416800
| 2021-04-28T15:04:15
| 2021-04-28T15:04:15
| 361,507,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,712
|
py
|
from django .shortcuts import render,redirect
from rest_framework.response import Response
from rest_framework import status
from books.models import Book,Isbn
from .serializers import IsbnSerializer
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated,BasePermission
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
#from django.contrib.auth.decorators import api_view , permission_classes
#from rest_framework import
# class IsViewer(BasePermission):
# def has_permission(self,request,view):
# return request.user.groups.filter(name="viewers").exists()
@api_view(["POST"])
def api_signup(request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data={
"success": True,
"message": "User has been registered successfully"
}, status=status.HTTP_201_CREATED)
return Response(data={
"success": False,
"errors": serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
def index(request):
#books = Book.objects.all()
#serializer=BookSerializer(instance=books,many=True)
isbns=Isbn.objects.all()
serializer=IsbnSerializer(instance=isbns,many=True)
return Response(data=serializer.data,status=status.HTTP_200_OK)
@api_view(["POST"])
def create(request):
serializer=IsbnSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data={
"success":True,
"message":"Book has been created successfully",
},
status=status.HTTP_200_OK,
)
return Response(data={
"success":False,
"errors":serializer.errors
},status=status.HTTP_404_BADREQUEST,
)
@api_view(["DELETE"])
def destroy(request, id):
try:
isbn=Isbn.objects.get(pk=id)
isbn.delete()
return Response(data={
"success":True,
"message":"Book has been deleted successfully",
},
status=status.HTTP_200_OK,
)
except Isbn.DoesNotExist:
return JsonResponse({'message': 'The book does not exist'}, status=status.HTTP_404_NOT_FOUND)
@api_view(["PUT"])
def update(request, id):
isbn=Isbn.objects.get(pk=id)
isbn_data = JSONParser().parse(request)
isbn_serializer = IsbnSerializer(isbn, data=isbn_data)
if isbn_serializer.is_valid():
isbn_serializer.save()
return JsonResponse(isbn_serializer.data)
return JsonResponse(isbn_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
[
"walaaelbasha40@yahoo.com"
] |
walaaelbasha40@yahoo.com
|
5b3a4cf867926248aefb635b5b895a9b7d33a3f8
|
6a087c6fb00ba91f815f997450306a3fac020a8b
|
/Test_case/CLGL/test自定义车辆.py
|
6a0ed72cd83c09c38f031f3ee7b5841b4c947e2f
|
[] |
no_license
|
anghu3/xizangbianfang
|
cd2037af5e06cc558bf3ef9ff145e0c33495139b
|
f4f35195963017c22bd4875853ef31f280f4b6a8
|
refs/heads/master
| 2020-03-28T22:31:06.591507
| 2018-12-14T07:36:47
| 2018-12-14T07:36:47
| 149,238,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,930
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 21 14:11:17 2018
@author: PCCC
"""
import unittest
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
import os
import re
from public_package.pubilc_package import url,login_name,login_name_test,login_password,login_password_test
from public_package.pubilc_package import sheet_setting, search, reset, currMenupath, page_title, goback, saveBtn , sheet_menu,sheet_prompt_message,work_space
from public_package.pubilc_package import TESTCASE
import HTMLTestRunner
import xlrd
'''
用例名称:
用例编号:
用例场景:
用例作者:
'''
xlsfile=work_space+r'\\'+sheet_menu.col_values(6,33,34)[0]
excel = xlrd.open_workbook(xlsfile)
global sheet
sheet = excel.sheet_by_name('自定义车辆')
class TESTCAST_ZIDINGYICHELIANG(TESTCASE):
def setUp(self):
self.dr = webdriver.Chrome()
self.dr.maximize_window()
def tearDown(self):
# print("脚本执行完成")
self.dr.quit()
def login(self, username, password):
self.dr.get(url)
self.dr.find_element_by_id('vv').send_keys(username)
self.dr.find_element_by_xpath('//*[@id="login_ff"]/div[2]/input').send_keys(password)
self.dr.find_element_by_xpath('//*[@id="login_ff"]/a').click()
def zidingyicheliang_search(self):
self.login(login_name, login_password)
self.dr.find_element_by_xpath(sheet_menu.col_values(1,33,34)[0]).click()
time.sleep(2)
self.assertEqual('车辆管理',self.dr.find_element_by_xpath(currMenupath).text,'校验车辆管理菜单')
self.dr.find_element_by_xpath(sheet_menu.col_values(3,33,34)[0]).click()
self.dr.find_element_by_xpath(sheet_menu.col_values(5,33,34)[0]).click()
self.dr.switch_to.frame('iframeb')
time.sleep(3)
self.assertEqual('自定义车辆列表',self.dr.find_element_by_xpath(page_title).text,'校验自定义车辆菜单')
def test01_zidingyicheliang_add(self):
self.zidingyicheliang_search()
add_value_cphm=sheet.col_values(1,0,1)[0]
self.dr.find_element_by_xpath('/html/body/div[3]/div[1]/div[2]/a[2]').click()
self.dr.find_element_by_xpath('//*[@id="vehicleNo"]').send_keys(add_value_cphm)
self.dr.find_element_by_xpath('//*[@id="veForm"]/div[1]/div[2]/a/span').click()
time.sleep(2)
self.dr.find_element_by_xpath('//*[@id="monitorReason"]').send_keys(sheet.col_values(1,2,3)[0])
self.dr.find_element_by_xpath('//*[@id="modifyBy"]').send_keys(sheet.col_values(1,3,4)[0])
self.dr.find_element_by_xpath('//*[@id="monitorUnit"]').click()
time.sleep(1)
self.dr.find_element_by_xpath('//*[@id="treeSelect_45_switch"]').click()
time.sleep(1)
self.dr.find_element_by_xpath('//*[@id="treeSelect_46_switch"]').click()
time.sleep(1)
self.dr.find_element_by_xpath('//*[@id="treeSelect_48_span"]').click()
self.dr.find_element_by_xpath(saveBtn).click()
self.dr.switch_to.default_content()
self.dr.switch_to.frame('iframeb')
time.sleep(1)
self.assertEqual(sheet_prompt_message.col_values(1, 1, 2)[0],
self.dr.find_element_by_xpath('//*[@id="gritter-item-1"]/div[2]/div[2]/p').text, '新增成功提示信息校验')
print('车辆管理-自定义车辆:新增功能正常')
def test02_zidingyicheliang_search_cphm(self):
self.zidingyicheliang_search()
search_value_cphm=sheet.col_values(1,0,1)[0]
cphm_path=sheet.col_values(1,1,2)[0]
self.dr.find_element_by_xpath(cphm_path).send_keys(search_value_cphm)
self.dr.find_element_by_xpath(search).click()
self.dr.switch_to.default_content()
time.sleep(2)
self.dr.switch_to.frame('iframeb')
paginal_number=self.dr.find_element_by_xpath(sheet_setting.col_values(4,1,2)[0]).text
column=3
self.pagination_num(paginal_number,search_value_cphm,column)
self.dr.find_element_by_xpath(reset).click()
time.sleep(2)
self.dr.find_element_by_xpath(search).click()
self.assertEqual('',self.dr.find_element_by_xpath(cphm_path).get_attribute('value'),'车牌号码-重置功能异常')
print('车辆管理-自定义车辆:车牌条件查询功能正常')
def test03_zidingyicheliang_details(self):
self.zidingyicheliang_search()
search_value_cphm = sheet.col_values(1, 0, 1)[0]
cphm_path = sheet.col_values(1, 1, 2)[0]
self.dr.find_element_by_xpath(cphm_path).send_keys(search_value_cphm)
self.dr.find_element_by_xpath(search).click()
self.dr.switch_to.default_content()
time.sleep(2)
self.dr.switch_to.frame('iframeb')
paginal_number = self.dr.find_element_by_xpath(sheet_setting.col_values(4, 1, 2)[0]).text
column = 3
self.pagination_num(paginal_number, search_value_cphm, column)
self.dr.find_element_by_xpath('//*[@id="list"]/tbody/tr[1]/td[11]/a').click()
self.assertEqual(sheet.col_values(1,0,1)[0],self.dr.find_element_by_xpath('//*[@id="vehicleNo"]').get_attribute('value'),'详情页面车牌号码校验异常')
print('车辆管理-自定义车辆:详情功能正常')
def test04_zidingyicheliang_edit(self):
self.zidingyicheliang_search()
search_value_cphm = sheet.col_values(1, 0, 1)[0]
cphm_path = sheet.col_values(1, 1, 2)[0]
self.dr.find_element_by_xpath(cphm_path).send_keys(search_value_cphm)
self.dr.find_element_by_xpath(search).click()
self.dr.switch_to.default_content()
time.sleep(2)
self.dr.switch_to.frame('iframeb')
paginal_number = self.dr.find_element_by_xpath(sheet_setting.col_values(4, 1, 2)[0]).text
column = 3
self.pagination_num(paginal_number, search_value_cphm, column)
self.dr.find_element_by_xpath('//*[@id="list"]/tbody/tr[1]/td[11]/a').click()
self.dr.find_element_by_xpath('//*[@id="monitorReason"]').send_keys(sheet.col_values(2, 2, 3)[0])
self.dr.find_element_by_xpath('//*[@id="modifyBy"]').send_keys(sheet.col_values(2, 3, 4)[0])
self.dr.find_element_by_xpath(saveBtn).click()
self.dr.switch_to.default_content()
self.dr.switch_to.frame('iframeb')
time.sleep(1)
self.assertEqual(sheet_prompt_message.col_values(1, 1, 2)[0],
self.dr.find_element_by_xpath('//*[@id="gritter-item-1"]/div[2]/div[2]/p').text, '新增成功提示信息校验')
print('车辆管理-自定义车辆:新增功能正常')
def test05_zidingyicheliang_delete(self):
self.zidingyicheliang_search()
search_value_cphm = '藏DK0700'
self.dr.find_element_by_xpath('//*[@id="form"]/div[1]/div/input').send_keys(search_value_cphm)
self.dr.find_element_by_xpath('//*[@id="search"]').click()
self.dr.switch_to.default_content()
time.sleep(2)
self.dr.switch_to.frame('iframeb')
paginal_number = self.dr.find_element_by_xpath('/html/body/div[3]/div[2]/div/div[4]/div[1]/span[1]').text
column = 3
self.pagination_num(paginal_number, search_value_cphm, column)
self.dr.find_element_by_xpath('//*[@id="list"]/thead/tr/th[1]/div[1]/input').click()
self.dr.find_element_by_xpath('/html/body/div[3]/div[1]/div[2]/a[1]').click()
self.dr.switch_to.default_content()
time.sleep(3)
self.dr.find_element_by_xpath('/html/body/div[3]/div[3]/div/button[2]/span').click()
time.sleep(5)
self.dr.switch_to.frame('iframeb')
self.assertEqual('没有找到匹配的记录',self.dr.find_element_by_xpath('//*[@id="list"]/tbody/tr/td').text,'校验删除功能是否正常')
print('车辆管理-自定义车辆:删除功能正常')
if __name__ == '__main__':
unittest.main()
|
[
"935331858@qq.com"
] |
935331858@qq.com
|
da110465a674ab0792226d4ac273b86fc9ed28ac
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03090/s812303792.py
|
0fb18fdf310d674ae4f216404ba354a1c71ffbf6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
#!/usr/bin/env python3
#AGC32 B
import sys
import math
from bisect import bisect_right as br
from bisect import bisect_left as bl
sys.setrecursionlimit(1000000000)
from heapq import heappush, heappop,heappushpop
from collections import defaultdict
from itertools import accumulate
from collections import Counter
from collections import deque
from operator import itemgetter
from itertools import permutations
mod = 10**9 + 7
inf = float('inf')
def I(): return int(sys.stdin.readline())
def LI(): return list(map(int,sys.stdin.readline().split()))
n = I()
m = n*(n-1)//2
ans = []
for i in range(n):
for j in range(i+1,n):
ans.append((i+1,j+1))
lst = []
if n % 2:
for i in range(n//2):
lst.append((i+1,n-i-1))
m -= 1
else:
for i in range(n//2):
lst.append((i+1,n-i))
m -= 1
for i,j in lst:
ans.remove((i,j))
print(m)
for i,j in ans:
print(i,j)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
79a3a476ca30b1e4a997e58c1852b7ece681f724
|
afbfb4479c031c4515d623507c3529d019b2506a
|
/link/json/collection.py
|
070cfd0e3a2b277dd4bd2c73129a02fd1bc2979a
|
[] |
no_license
|
linkdd/link.json
|
22e95664635986208a8ce51d6d7d410bb5012f68
|
2169a4252d3393def0e37d3a1aa167dd0b77c730
|
refs/heads/master
| 2020-04-15T13:38:33.317573
| 2016-09-16T13:50:41
| 2016-09-16T13:50:41
| 58,967,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,275
|
py
|
# -*- coding: utf-8 -*-
from b3j0f.conf import Configurable, category, Parameter
from link.json.schema import JsonSchema
from link.json import CONF_BASE_PATH
DEFAULT_SCHEMA = 'http://hyperschema.org/mediatypes/collection-json.json'
@Configurable(
paths='{0}/collection.conf'.format(CONF_BASE_PATH),
conf=category(
'JSONCOLLECTION',
Parameter(name='version', value='1.0'),
Parameter(name='schema', value=DEFAULT_SCHEMA)
)
)
class CollectionJSONResponse(object):
"""
Helper class used to generate valid Collection+JSON objects.
"""
ITEM_ID = 'id'
def __init__(
self,
href,
links=None,
items=None,
queries=None,
template=None,
error=None,
*args, **kwargs
):
"""
:param href: Base URL
:type href: str
:param links: Optional list of links
:type links: list
:param items: Optional list of items
:type items: list
:param queries: Optional list of queries
:type queries: list
:param template: Optional item template
:type template: dict
:param error: Optional error
:type error: dict
"""
super(CollectionJSONResponse, self).__init__(*args, **kwargs)
self.href = href
self.links = links
self.items = items
self.queries = queries
self.template = template
self.error = error
self.validator = JsonSchema()
def json(self):
"""
Generate JSON object.
:return: Collection+JSON object
:rtype: dict
"""
base = {
'collection': {
'version': self.version,
'href': self.href
}
}
if self.links is not None:
base['collection']['links'] = self.links
if self.items is not None:
base['collection']['items'] = self.items
if self.queries is not None:
base['collection']['queries'] = self.queries
if self.template is not None:
base['collection']['template'] = self.template
if self.error is not None:
base['collection']['error'] = self.error
self.validator.validate(self.schema, base)
return base
@staticmethod
def template_from_schema(schema):
tmpl = {
'template': {
'data': []
}
}
if 'properties' in schema:
for propname in schema['properties']:
prop = schema['properties'][propname]
data = {
'name': propname
}
if 'default' in prop:
data['value'] = prop['default']
if 'title' in prop:
data['prompt'] = prop['title']
elif 'description' in prop:
data['prompt'] = prop['description']
tmpl['template']['data'].append(data)
return tmpl
@classmethod
def make_item(cls, href, document, schema=None):
item = {
'href': '{0}/{1}'.format(href, document.get(cls.ITEM_ID, '')),
'data': []
}
if schema is not None and 'links' in schema:
item['links'] = []
for link in schema['links']:
itemlink = {
'href': link['href'].format(**document),
'rel': link['rel']
}
if 'title' in link:
itemlink['name'] = link['title']
if 'description' in link:
itemlink['prompt'] = link['description']
item['links'].append(itemlink)
for key in document:
data = {
'name': key,
'value': document[key]
}
if schema is not None and key in schema.get('properties', {}):
prop = schema['properties'][key]
if 'title' in prop:
data['prompt'] = prop['title']
elif 'description' in prop:
data['prompt'] = prop['description']
item['data'].append(data)
return item
def generate_collection_response(
href,
links=None,
items=None,
queries=None,
schema=None,
error=None
):
"""
Helper instantiating a ``CollectionJSONResponse`` class using the default
schema.
:param href: Base URL
:type href: str
:param links: Optional list of links
:type links: list
:param items: Optional list of items
:type items: list
:param queries: Optional list of queries
:type queries: list
:param schema: Optional item schema
:type schema: dict
:param error: Optional error
:type error: dict
:return: Collection+JSON object
:rtype: dict
"""
resp = CollectionJSONResponse(
href,
links=links,
items=[
CollectionJSONResponse.make_item(href, item, schema=schema)
for item in items
],
queries=queries,
template=CollectionJSONResponse.template_from_schema(schema),
error=error
)
return resp.json()
|
[
"david.jose.delassus@gmail.com"
] |
david.jose.delassus@gmail.com
|
1f7723168e6a49bcdc616eed2a91fc7a05d1b3d3
|
f3057f554643bd68ccf5fb34966f8233c57c600f
|
/djmodels/blog/migrations/0007_auto_20171213_1357.py
|
0f340f6c17444af0f3f8de61a33fd8e29ffcc8c9
|
[] |
no_license
|
travishen/djcore
|
e917eeefc0d8a985fa083bbaf6b426ad4af31dcb
|
e611d82797abd04f5909b809e6c4debb46eece11
|
refs/heads/master
| 2021-09-03T02:52:38.336089
| 2018-01-05T02:24:16
| 2018-01-05T02:24:16
| 113,840,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-12-13 13:57
from __future__ import unicode_literals
import blog.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20171213_1225'),
]
operations = [
migrations.AddField(
model_name='postmodel',
name='slug',
field=models.SlugField(blank=True, null=True),
),
migrations.AlterField(
model_name='postmodel',
name='author_email',
field=models.CharField(blank=True, max_length=240, null=True, validators=[blog.validators.validate_author_email]),
),
]
|
[
"travishen.tw@gmail.com"
] |
travishen.tw@gmail.com
|
d2da3d3a0bb26f5782b9c140d847572bc7cd8ec0
|
fc0a6e0f9ffa90a2473fec77bc52ea02e9b21f55
|
/venv/lib/python3.7/site-packages/akshare/qhkc/funcs.py
|
ef5311e4c8f7909a2359e139fe2ab25f1364655b
|
[] |
no_license
|
YixuanSeanZhou/COVID19_Scraping
|
3903e697caf406c7d357afd8cc43811d62896244
|
b84890c4a5ddef589cd76d1ed8fd4a1976f4e3c4
|
refs/heads/master
| 2022-09-08T16:14:33.292096
| 2020-05-23T04:26:02
| 2020-05-23T04:26:02
| 266,261,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2020/2/13 23:11
contact: jindaxiang@163.com
desc: 可用函数库 --> client.py --> DataApi
"""
class QhkcFunctions:
@staticmethod
def variety_positions(fields="shorts", code="rb1810", date="2018-08-08"):
"""
奇货可查-商品-持仓数据接口
:param fields: 需要返回的字段, shorts or longs
:type fields: str
:param code: 合约代号
:type code: str
:param date: 查询日期
:type date: str
:return: 商品-持仓数据
:rtype: pandas.DataFrame
broker string 席位
long int 该席位多头持仓量
long_chge int 该席位多头持仓变化量
short int 该席位空头持仓量
short_chge int 该席位空头持仓变化量
"""
pass
@staticmethod
def variety_net_positions(fields="", symbol="RB", broker="永安期货", date="2018-08-08"):
"""
奇货可查-商品-商品净持仓数据接口
:param fields: 需要返回的字段
:type fields: str
:param symbol: 查询品种编码
:type symbol: str
:param broker: 席位
:type broker: str
:param date: 查询日期
:type date: str
:return: 商品-商品净持仓数据
:rtype: dict
trans_date date 查询日期
net_position int 净持仓数据
"""
pass
|
[
"thomaszhou2333@gmail.com"
] |
thomaszhou2333@gmail.com
|
2f5013ac6fd73da64e153628614dfa276fd04d3b
|
48a36fddd9e7c584a9792533c11601f0e4619885
|
/torchvision/edgeailite/xvision/losses/flow_loss.py
|
d3d238fa87d39813f0da26d516e3e186af56441d
|
[
"MIT",
"BSD-2-Clause-Views",
"BSD-3-Clause"
] |
permissive
|
supermy00/edgeai-torchvision
|
8c152e796590ae5f6ae4f6948cbfb132409506a0
|
29b02c32b26ea8c0c319a376ab8a9a1b9ada25b5
|
refs/heads/master
| 2023-09-02T02:36:47.068701
| 2021-11-17T10:08:17
| 2021-11-17T10:08:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,504
|
py
|
#################################################################################
# Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
import torch
from .basic_loss import *
from .loss_utils import *
__all__ = [
'end_point_error', 'end_point_loss', 'outlier_fraction', 'outlier_precentage'
]
############################################################################
class EPError(BasicNormLossModule):
def __init__(self, sparse=False, error_fn=l2_norm, error_name='EPError'):
super().__init__(sparse=sparse, error_fn=error_fn, error_name=error_name)
end_point_error = EPError
end_point_loss = EPError
############################################################################
def outlier_check(prediction, target, absolute_thr=3.0, relative_thr=0.05):
norm_dist = l2_norm(prediction, target)
norm_pred = l2_norm(prediction)
norm_target = l2_norm(target)
eps_arr = (norm_target == 0).float() * (1e-6) # To avoid division by zero.
rel_dist = norm_pred / (norm_target + eps_arr)
is_outlier = ((norm_dist > absolute_thr) & (rel_dist > relative_thr)).float()
return is_outlier
def outlier_check_x100(opt, target, absolute_thr=3.0, relative_thr=0.05):
return outlier_check(opt, target, absolute_thr, relative_thr) * 100.0
class OutlierFraction(BasicNormLossModule):
def __init__(self, sparse=False, error_fn=outlier_check, error_name='OutlierFraction'):
super().__init__(sparse=sparse, error_fn=error_fn, error_name=error_name)
#
outlier_fraction = OutlierFraction
class OutlierPercentage(BasicNormLossModule):
def __init__(self, sparse=False, error_fn=outlier_check_x100, error_name='OutlierPercentage'):
super().__init__(sparse=sparse, error_fn=error_fn, error_name=error_name)
#
outlier_precentage = OutlierPercentage
############################################################################
|
[
"a0393608@udtensorlab5"
] |
a0393608@udtensorlab5
|
5a1cfbe5e8072892a669b1003898b436401022a5
|
a807ce0fa3e3e9c3b558b2e977c05e60c3a667b1
|
/nemo_text_processing/inverse_text_normalization/en/verbalizers/cardinal.py
|
bd053bebf7645a4fbb554dfb082d991b1be9c614
|
[
"Apache-2.0"
] |
permissive
|
blisc/NeMo
|
630376e7555c0face994da2f6f9af5d8d31243c3
|
fadeb45c84d6b323d78e30475538455a88b7c151
|
refs/heads/rework_reqs
| 2023-08-17T00:03:39.248669
| 2021-08-12T15:15:06
| 2021-08-12T15:15:06
| 208,142,160
| 2
| 0
|
Apache-2.0
| 2022-02-03T16:30:33
| 2019-09-12T20:37:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { integer: "23" negative: "-" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
|
[
"noreply@github.com"
] |
blisc.noreply@github.com
|
ca75f05cd80acb75911622ecaa2e8e98c607b2fa
|
1213bcf770a94a89b39be8dc7b99a3f7e35fd369
|
/src/alloy/backend/x64/optimizer/passes/sources.gypi
|
6d80257ca4d2836c37d094b89bcaa73a046bbcae
|
[] |
no_license
|
wtfaremyinitials/xenia
|
c86e4625a1dd084d97d44c3242e2faf208bca2b8
|
16b3ecd5897051f82bc236ad9a4d0ab5cab22e87
|
refs/heads/master
| 2020-12-31T02:43:53.168712
| 2014-01-14T22:06:05
| 2014-01-14T22:06:05
| 15,918,955
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
gypi
|
# Copyright 2013 Ben Vanik. All Rights Reserved.
{
'sources': [
'reachability_pass.cc',
'reachability_pass.h',
'redundant_mov_pass.cc',
'redundant_mov_pass.h',
'register_allocation_pass.cc',
'register_allocation_pass.h',
],
}
|
[
"ben.vanik@gmail.com"
] |
ben.vanik@gmail.com
|
fc52d8ec81fe4b307261bd7e11d50deb5d97ee67
|
9ba00eb872099e6fe69c4e6b3561b0578cc09ca4
|
/exoplanets.py
|
cc3e8a16376dfb30a79845c999770f1eb685ffd8
|
[] |
no_license
|
RuthAngus/exoplanet_travel
|
4ab56ee3adfaaeca009e6997706f42091d634c01
|
b7248030e8713811f65239d5745fbd9493dcfd58
|
refs/heads/master
| 2020-12-24T13:28:35.234907
| 2014-07-01T16:14:24
| 2014-07-01T16:14:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,548
|
py
|
from exoplanet_pictograms import plot_exoplanet, plot_name, remove_space
from flask import Flask, url_for
from flask import render_template
import numpy as np
import kplr
import csv
app = Flask(__name__)
@app.route('/')
def index(name=None, text=None):
name = nm()
print name
text1 = p_text(name)
text2 = d_text(name)
text3 = price(name)
return render_template('index.html', name=name, p_text=text1, d_text=text2, cut_name=remove_space(name), \
dist = text3)
@app.route('/image/<name>')
def image(name):
return plot_name(name)
def nm():
data = np.genfromtxt("/users/angusr/python/exoplanet_travel/transit_data.txt", \
dtype=str, delimiter=',', skip_header=2).t
# select Kepler planets only
r = np.random.randint(0, len(data[0][0]))
name = data[0]
while str(name)[0] != 'K':
r = np.random.randint(0, len(data[0][0]))
name = data[0]
return name[r]
def p_text(name):
data = np.genfromtxt("/Users/angusr/Python/exoplanet_travel/transit_data.txt", \
dtype=str, delimiter=',', skip_header=2).T
n = data[0]
l = n == name
periods = data[3]
a = float(data[2][l][0])#/1.48e-11
p = float(periods[l][0])
print p
if p < 20:
tstar = 5000.
albedo = .5
rstar = 1.
teq = int(600.)
# teq = tstar*(1-albedo)**.25 * (rstar/(2*a))**.5
if p < 20:
r = np.random.randint(0,2)
if r == 0:
return "It's time to turn up the heat! With surface temperatures in excess of %s C, \
this planet is a scorcher"%teq
elif r == 1:
return "Love sunbathing? On this planet its so hot that even a moment of exposure will incinerate \
you. High factor Sun scream required."
elif r == 2:
return "Enjoy long summer evenings and 1000 degree days? You'll need to spend them all in a \
protective lead case, but they'll probably still be enjoyable."
else:
r = np.random.randint(0,1)
if r == 0:
return "This is a cool place to be - too cool in fact. At -100 degrees you'll need to \
take that extra layer for sure."
else:
return "Might want to pack an extra jumper."
def d_text(name):
data = np.genfromtxt("/Users/angusr/Python/exoplanet_travel/transit_data.txt", \
dtype=str, delimiter=',', skip_header=2).T
n = data[0]
l = n == name
name = data[0][l][0]
mass = float(data[1][l][0])
radius = 1.
try:
radius = float(data[10][l][0])
except:
pass
print mass, radius
d = mass/(4*radius)**3
r = np.random.randint(0,1)
# low g
if d > .5:
if r == 0:
return "If things are getting a little 'heavy' back home, you'll feel lighter \
than air on this low-g planet"
else:
return "One of the big pulls for %s is its gravitational field. At... Gs \
you'll come back feeling like Superman" %name
# high g
if d < .5:
if r == 0:
return "There are many attractions on this planet, but gravity isn't one of them. Its \
gravitational field is a mere 50 percent of the Earth's so you'll feel \
like you're floating the whole time"
else:
return "This is the perfect place to lose a few pounds. In fact you'll only weigh 0.1 Kg due to its \
low gravity"
def nm():
data = np.genfromtxt("/Users/angusr/Python/exoplanet_travel/transit_data.txt", \
dtype=str, delimiter=',', skip_header=2).T
r = np.random.randint(0,len(data[0][0]))
name = data[0]
return name[r]
def price(name):
distances = (500, 300, 100, 600, 1000, 10)
# 12 litres per km
# 2.82 dollars per gallon
# 4.54 litres per gallon
# .62 dollars per litre
# 7.44 dollars per km
# 4.3896 GBP per km
# 1.317e+17 gbp per parsec
r = np.random.randint(0, len(distances))
cost = 1.317e+17 * distances[r]
# stringy = str(cost)
# z = str[-2:]
r = np.random.randint(0,3)
if r == 0:
return "Only %s GBP!*" %cost
elif r == 1:
return "Special offer! %s GBP!*" %cost
elif r == 2:
return "%s GBP TODAY ONLY*" %cost
elif r == 3:
return "Only 2 seats left at %s GBP*" %cost
if __name__ == '__main__':
# name = nm()
# d_text(name)
# raw_input('enter')
app.run(debug=True)
|
[
"ruth.angus@astro.ox.ac.uk"
] |
ruth.angus@astro.ox.ac.uk
|
00061ee28548b2c6b4aefaa5471d754a09c8788d
|
653a3d9d66f3d359083cb588fc7c9ece8bb48417
|
/src/graph_transpiler/webdnn/backend/fallback/kernels/cosh.py
|
ceb89aa2948de8ed8a485af2566d587d6dfce987
|
[
"Zlib",
"MIT"
] |
permissive
|
leonskim/webdnn
|
fec510254b15f3dec00f5bed8f498737b372e470
|
f97c798c9a659fe953f9dc8c8537b8917e4be7a2
|
refs/heads/master
| 2020-04-15T18:42:43.632244
| 2019-01-10T10:07:18
| 2019-01-10T10:07:18
| 164,921,764
| 0
| 0
|
NOASSERTION
| 2019-01-09T19:07:35
| 2019-01-09T19:07:30
|
Python
|
UTF-8
|
Python
| false
| false
| 186
|
py
|
from webdnn.backend.fallback.kernels.elementwise import register_elementwise_kernel
from webdnn.graph.operators.cosh import Cosh
register_elementwise_kernel(Cosh, "y = Math.cosh(x0);")
|
[
"y.kikura@gmail.com"
] |
y.kikura@gmail.com
|
9edbd7dd5d1dc89004fd5abd46398875aab00e34
|
1afa6c852dfc922d1a26a384d965976f31a87692
|
/Imaging/Core/Testing/Python/TestSeparableFilter.py
|
da6b137d7329c12a4b1484262e2fe32f6bd2f967
|
[
"BSD-3-Clause"
] |
permissive
|
dgobbi/VTK
|
631d037aacc7258861e70f77c586b01cd4ebff3f
|
17f232ee440025c26bc78a897edef78e9fc78510
|
refs/heads/master
| 2021-01-04T22:27:46.611907
| 2013-03-01T19:44:02
| 2013-03-01T19:44:02
| 938,377
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
# Take the gradient in X, and smooth in Y
# Create a simple gradient filter
kernel = vtk.vtkFloatArray()
kernel.SetNumberOfTuples(3)
kernel.InsertValue(0,-1)
kernel.InsertValue(1,0)
kernel.InsertValue(2,1)
# Create a gaussian for Y
sigma = 1.5
sigma2 = expr.expr(globals(), locals(),["sigma","*","sigma"])
gaussian = vtk.vtkFloatArray()
gaussian.SetNumberOfTuples(31)
i = 0
while i < 31:
x = expr.expr(globals(), locals(),["i","-","15"])
g = expr.expr(globals(), locals(),["exp","(","-","(","x","*","x",")","/","(","2.0","*","sigma2",")",")","/","(","sqrt","(","2.0","*","3.1415",")","*","sigma",")"])
gaussian.InsertValue(i,g)
i = i + 1
convolve = vtk.vtkImageSeparableConvolution()
convolve.SetInputConnection(reader.GetOutputPort())
convolve.SetDimensionality(2)
convolve.SetXKernel(kernel)
convolve.SetYKernel(gaussian)
viewer = vtk.vtkImageViewer()
#viewer DebugOn
viewer.SetInputConnection(convolve.GetOutputPort())
viewer.SetColorWindow(500)
viewer.SetColorLevel(100)
viewer.Render()
# --- end of script --
|
[
"nikhil.shetty@kitware.com"
] |
nikhil.shetty@kitware.com
|
a2face64492994c1d89d87de17f1735956e91868
|
1f214956382f62e876de3d00c40c5a8684a3b5df
|
/preprocessing/tests/test_mysql.py
|
ef73d63c507114b639af108327ad9518ec0fd887
|
[
"MIT"
] |
permissive
|
ennima/omas
|
e1f00e0d3445f995d36b221c43ab47113750aeee
|
c8507b95c8c07a311c29c70acc0a7d3504d28f78
|
refs/heads/master
| 2021-01-18T18:11:49.027451
| 2016-10-14T19:28:14
| 2016-10-14T19:28:14
| 56,949,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
import sys
import unittest
sys.path.append('../')
sys.path.append('../db')
from MysqlProcessing import *
from pprint import pprint
class MysqlProcessingTest(unittest.TestCase):
# def test_it_should_be_able_to_construct(self):
# mysql = MysqlProcessing()
# self.assertIsInstance(mysql,MysqlProcessing,"Es instancia")
# def test_it_should_be_able_to_load(self):
# mysql = MysqlProcessing()
# self.assertTrue(mysql.load("../db/db.json"))
# #pprint(mysql.data)
def test_making_a_table(self):
#Global path is for find compiler files.
mysql = MysqlProcessing("../db/")
mysql.publish_path = "C:\\Users\\enrique.nieto\\Documents\\develops\\omas\\build_test\\db\\"
mysql.prettyfy = False
mysql.publish_single_file = True
mysql.publsh_to_file = True
mysql.db_mysql_engine = "INNODB"
# mysql.db_charset_latin = False
#mysql.process_create = False
#This is a specific path not global
mysql.load("../db/db.json")
mysql.process()
print(mysql.tables)
if __name__ == '__main__':
unittest.main()
|
[
"ennima@hotmail.com"
] |
ennima@hotmail.com
|
3435d0e6edaab0672cac56775147c66023890e1f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_75/288.py
|
5d27fe84e3a6db4dda7c09565446712b904e8b3e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
# coding:utf-8
import sys
argvs = sys.argv
argc = len(argvs)
#print argvs
#print argc
f = open(argvs[1])
T = f.readline()
for i in range(int(T)):
comb_rules = []
oppo_rules = []
line = f.readline()
tc = line.split(' ')
C = int(tc[0]) # 変換ルールの個数
for j in range(C):
temp = tc[j+1]
t_l = [[temp[0], temp[1]], temp[2]]
comb_rules.append(t_l)
t_l = [[temp[1], temp[0]], temp[2]]
comb_rules.append(t_l)
D = int(tc[C+1]) # 反発ルールの数
for j in range(D):
temp = tc[j + C + 2]
t_l = [temp[0], temp[1]]
oppo_rules.append(t_l)
t_l = [temp[1], temp[0]]
oppo_rules.append(t_l)
N = tc[C+D+2] # 文字数
S = tc[C+D+3] # 文字列
ls = []
for j in range(int(N)):
temp = S[j]
for l in comb_rules:
if l[0][0] == temp and len(ls) > 0 and l[0][1] == ls[len(ls)-1]:
ls.pop()
temp = l[1]
chk = 0
for l in oppo_rules:
lss = set(ls)
if l[0] == temp and l[1] in lss:
ls = []
chk = 1
if chk == 0:
ls.append(temp)
ans_s = str(ls)
sys.stdout.write("Case #")
sys.stdout.write(str(i+1))
sys.stdout.write(": ")
sys.stdout.write(ans_s.replace("'",""))
sys.stdout.write("\n")
# print ls
f.close
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1fdcdc99bcf503501c3716c003b8e28528b0ce68
|
0857913ae5cde7481c5bca63ed5081e11f217581
|
/p1804/p10While/whileTest.py
|
5d346d2f002ca0cee68216cb7d6c969ca233f173
|
[] |
no_license
|
nmww/p1804_ceshi
|
48c2d93849018d8601f0732c5005395c81490ef1
|
72bf25cc7767371594b41f8919454e46fe178023
|
refs/heads/master
| 2020-03-15T23:25:43.345763
| 2018-06-27T03:19:55
| 2018-06-27T03:19:55
| 132,393,611
| 53
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
'''
while 循环
# while语句作用的范围是
# 下方相同缩进的 所有语句
while 循环条件:
pass
pass
循环条件:
1. 是否成立: True / False
2. 条件判断控制 a 比较 b
'''
#-------------------------------
qiandao = 0 # 没有签到
xinqing = 'kaixin'
while qiandao < 10:
print ("您未签到,请尽快去签到,否则要跪键盘 %d "% qiandao)
qiandao = qiandao + 1
#----------------------------------
# True 始终成立
# Fasle 始终不成立
while False:
print ("你应该签到了!")
|
[
"2423550286@qq.com"
] |
2423550286@qq.com
|
4dc98360b6b09c2585684d65e841dc4239d20e34
|
401ea01ffb848f1eabd8aa17690ec1ff5dc8e6bd
|
/test/test_action_event.py
|
62370e432b086e11d277be6874c16774885dc2d8
|
[] |
no_license
|
bbrangeo/python-api-client
|
735acda3627d7a0ddd78ecb1e9617bb4082c9001
|
c2481e0cd012a41aeceefdce289d48509540b909
|
refs/heads/master
| 2020-03-14T18:24:20.888631
| 2018-04-30T14:47:47
| 2018-04-30T14:47:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
# coding: utf-8
"""
BIMData API
BIMData API documentation # noqa: E501
OpenAPI spec version: v1
Contact: contact@bimdata.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import bimdata_api_client
from bimdata_api_client.models.action_event import ActionEvent # noqa: E501
from bimdata_api_client.rest import ApiException
class TestActionEvent(unittest.TestCase):
"""ActionEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testActionEvent(self):
"""Test ActionEvent"""
# FIXME: construct object with mandatory attributes with example values
# model = bimdata_api_client.models.action_event.ActionEvent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"hugo.duroux@gmail.com"
] |
hugo.duroux@gmail.com
|
9d4a06013d0517080446f5dfd957558c9e77d8d9
|
a86fda09a185ebf367e31cf26589161303f9497a
|
/metrics/topologyVertex.py
|
3b6fc44e77e468ade8bbf02eebf09fef94eecc0f
|
[
"BSD-3-Clause"
] |
permissive
|
kristianeschenburg/metrics
|
67ec2cd5b697241eee35da46daf71b2d735cdb64
|
53900f8130cb7dd762ae3e816225fb4f178a5b29
|
refs/heads/master
| 2020-03-25T10:27:29.623361
| 2019-04-02T21:20:08
| 2019-04-02T21:20:08
| 116,600,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
import numpy as np
def labelCounts(label, adjacencyList):
"""
For each vertex, count the number of vertices with other labels that are
adjacent to it.
Parameters:
- - - - -
label : int, array
label vector
adjacencyList : SurfaceAdjacency
adjacency list for surface mesh
"""
values = list(set(np.unique(label)).difference({-1,0 }))
aggregateCounts = {k: {h: 0 for h in values} for k in values}
aggregateMatrix = np.zeros((len(values), len(values)))
# loop over each unique label value
for j, v in enumerate(values):
# get indices of label
idxv = np.where(label == v)[0]
# loop over vertices with this label and count number of neighboring vertices
# with different label values
for ind in idxv:
n = adjacencyList[ind]
nCounts = neighborhoodCounts(n, label, values)
for n in nCounts:
aggregateCounts[v][n] += nCounts[n]
counts = aggregateCounts[v].values()
aggregateMatrix[j, :] = counts
rowSums = aggregateMatrix.sum(axis=1)
rowNormed = aggregateMatrix / rowSums[:, None]
return [aggregateMatrix, rowNormed]
def neighborhoodCounts(subscripts, label, values):
"""
Compute the number of neighbors of each label directly adjacent to a
vertex.
Parameters:
- - - - -
subscripts : list
indices of directly-adjacent vertices / voxels
reshaped : int, array
label vector
values : accepted label values
"""
neighbors = list(label[subscripts])
counts = {}.fromkeys(values)
for v in values:
counts[v] = neighbors.count(v)
return counts
|
[
"keschenb@uw.edu"
] |
keschenb@uw.edu
|
d8c33a598956091bab533eda8716d63e6ce852b5
|
2986a62a5221885b2f070060aadb9c9ab1e84035
|
/Aula10 - Condições em Python/ex033 - Maior e menor valor.py
|
1084395b85db8996bebe2ec6aea68e8a3f430e7f
|
[] |
no_license
|
igorkoury/cev-phyton-exercicios-parte-1
|
dcb39772be48ba7333a391af4e9fda025654472f
|
b187565ca996402f4862ad3000d18bfb461e269e
|
refs/heads/main
| 2023-08-28T02:38:32.311162
| 2021-10-06T21:38:39
| 2021-10-06T21:38:39
| 414,375,879
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Faça um programa que leia 3 números e diga qual é o maior e o menor.
print("Digite 3 numeros para saber quem é o menor e o maior.")
n1 = int(input("Digite um número: "))
n2 = int(input("Digite outro: "))
n3 = int(input("Digite mais um: "))
print("Você digitou {}, {} e {} respectivamente.".format(n1, n2, n3))
menor = n1
if n1 > n2 and n3 > n2:
menor = n2
if n2 > n3 and n1 > n3:
menor = n3
maior = n1
if n1 < n2 and n1 < n3:
maior = n2
if n2 < n3 and n1 < n3:
maior = n3
print("O menor valor digitado foi {}.".format(menor))
print("O maior valor digitado foi {}.".format(maior))
|
[
"noreply@github.com"
] |
igorkoury.noreply@github.com
|
df9181e897d1c79756259eeadcb4711eca0fff67
|
04975a41eb459f1528dcbdcb1143a3cb535aa620
|
/Tree_easy/leetcode_590.py
|
ef470b8651a6a2b4c709b9cdf7f36ce6651cff88
|
[] |
no_license
|
RickLee910/Leetcode_easy
|
2a50d632379826979a985e1b9950d4cf6bbd8b18
|
c2687daf334f96a908737067bb915b8b072d0d56
|
refs/heads/master
| 2023-01-29T11:09:26.701243
| 2020-12-02T04:36:14
| 2020-12-02T04:36:14
| 294,952,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
class Solution:
def postorder(self, root: 'Node') -> List[int]:
ans = []
def dfs(root,ans):
if root == None:
return
for i in root.children:
dfs(i,ans)
ans.append(root.val)
return ans
dfs(root, ans)
return ans
|
[
"13554543910@163.com"
] |
13554543910@163.com
|
455ab705f17ca676bf7042b5beb8912bd9ac74c8
|
6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5
|
/tests/kyu_7_tests/test_patterncraft_visitor.py
|
8824069b8298d54921b0b5d2084e335ee8fe30a0
|
[
"MIT"
] |
permissive
|
mveselov/CodeWars
|
e4259194bfa018299906f42cd02b8ef4e5ab6caa
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
refs/heads/master
| 2021-06-09T04:17:10.053324
| 2017-01-08T06:36:17
| 2017-01-08T06:36:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
import unittest
from katas.kyu_7.patterncraft_visitor import Marauder, Marine, TankBullet
class PatterncraftVisitorTestCase(unittest.TestCase):
def setUp(self):
self.bullet = TankBullet()
self.light = Marine()
self.bullet2 = TankBullet()
self.armored = Marauder()
def test_equals(self):
self.light.accept(self.bullet)
self.assertEqual(self.light.health, 100 - 21)
def test_equals_2(self):
self.armored.accept(self.bullet2)
self.assertEqual(self.armored.health, 125 - 32)
|
[
"the-zebulan@users.noreply.github.com"
] |
the-zebulan@users.noreply.github.com
|
31de42d8a93f76985b06a65ff8e74ce14733af1e
|
bec8f33002130d8395f4ac4f0c74b785aa22cac5
|
/appium/options/common/language_option.py
|
f82de63d08680cc5a2a131403286ba603d4c8827
|
[
"Apache-2.0"
] |
permissive
|
appium/python-client
|
1c974fdf1ac64ce4ac37f3fc8c0a3e30c186d3ca
|
2e49569ed45751df4c6953466f9769336698c033
|
refs/heads/master
| 2023-09-01T22:14:03.166402
| 2023-09-01T11:52:27
| 2023-09-01T11:52:27
| 18,525,395
| 1,588
| 606
|
Apache-2.0
| 2023-09-10T02:00:09
| 2014-04-07T17:01:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from .supports_capabilities import SupportsCapabilities
LANGUAGE = 'language'
class LanguageOption(SupportsCapabilities):
@property
def language(self) -> Optional[str]:
"""
Language abbreviation to use in a test session.
"""
return self.get_capability(LANGUAGE)
@language.setter
def language(self, value: str) -> None:
"""
Set language abbreviation to use in a test session.
"""
self.set_capability(LANGUAGE, value)
|
[
"noreply@github.com"
] |
appium.noreply@github.com
|
8f16b2d94dc4d8a4ebc5c2b779b1049670c0faa5
|
d8829cbc2d2863f68cb1b447a878dce0ac20a878
|
/scraper/positions.py
|
40a42e62b99118840ea7c9eef7ef2123b8b36208
|
[] |
no_license
|
XiaoxiaoWang87/InsightPrj
|
170a9757dfdf4669ee2c52322f2f5e5d766ce2a1
|
767a3719fad93ddb9711817f543b5e7b1822f680
|
refs/heads/master
| 2021-01-19T00:41:44.195663
| 2014-10-06T22:28:24
| 2014-10-06T22:28:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
import pandas as pd
df = pd.read_csv('position_draft.csv',sep='\t')
for index, row in df.iterrows():
#if row["DRAFT"] == '[]':
if row["POSITION"] == '[]':
print row["PLAYERCODE"]
|
[
"="
] |
=
|
8ba17eac98795a2e90941bb8a59a7c5f271b75eb
|
a9937139b1af85180cea706a52d447abce2430f4
|
/e/Pliki/otwieranie_pliku.py
|
1f748da7717c491ec9513b293b31648e29137cf4
|
[] |
no_license
|
MirekPz/Altkom
|
8f16014d43adb10e87804ae2b5d23151924cb226
|
0a49e75e681593b41d07cbff63dea0723a11756b
|
refs/heads/master
| 2020-09-29T00:12:37.990353
| 2019-12-13T15:52:30
| 2019-12-13T15:52:30
| 226,899,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
plik = open('example_txt_file.dmo', 'r')
zawartosc = plik.read()
print(zawartosc)
plik.close()
input('drugi:')
# with open
with open('example_txt_file.dmo') as file:
print(file.read())
|
[
"mirek@info-tur.pl"
] |
mirek@info-tur.pl
|
a3d44c517ab38a95c83ab8b1483260b48e15327b
|
4328c71ddbb3dd564c04c599f8e95afef16e0d7c
|
/dg/4-sysProcess/06-全局变量在多个进程中不共享.py
|
773b57b49e89c4bc420af8e6576049b0201d5761
|
[] |
no_license
|
amtfbky/git_py
|
470fb7bc8f2c5017d47c31e1af8bb042bfe377b4
|
6159049b088036ebd133f5dab917d3e540a304c8
|
refs/heads/master
| 2020-03-09T04:16:12.991206
| 2018-06-01T01:16:05
| 2018-06-01T01:16:05
| 128,583,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import os
import time
g_num = 100
ret = os.fork()
if ret == 0:
print("------process-1--------")
g_num += 1
print("------pricess-1---g_num=%d--"%g_num)
else:
time.sleep(1)
print("------process-2--------")
print("------pricess-2---g_num=%d--"%g_num)
|
[
"xjbrhnhh@163.com"
] |
xjbrhnhh@163.com
|
2901a64d6a75331ae821fc785d834c03fc4d5a31
|
616c3c02be31b9ae4d06bd7c5a8d4a2e7c446aa1
|
/1370.上升下降字符串.py
|
36aae8d92dbacc45541253677dc27fdce052d717
|
[] |
no_license
|
L1nwatch/leetcode-python
|
8b7c47c04ee9400d50d8b0764a544a0463df8f06
|
0484cbc3273ada25992c72105658cd67411c5d39
|
refs/heads/master
| 2023-01-11T14:53:15.339276
| 2023-01-11T05:24:43
| 2023-01-11T05:24:43
| 194,516,548
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
#
# @lc app=leetcode.cn id=1370 lang=python3
#
# [1370] 上升下降字符串
#
# @lc code=start
class Solution:
def sortString(self, s: str) -> str:
s = list(s)
s.sort()
answer = list()
while len(s) > 0:
# step1
char = s.pop(0)
answer.append(char)
# step2 + step3
index = 0
while index < len(s):
if s[index] != char:
char = s.pop(index)
answer.append(char)
else:
index += 1
# step4
if len(s) > 0:
char = s.pop()
answer.append(char)
# step5 + step6
for index in range(len(s)-1, -1, -1):
if s[index] != char:
char = s.pop(index)
answer.append(char)
return "".join(answer)
# @lc code=end
|
[
"watch1602@gmail.com"
] |
watch1602@gmail.com
|
f4ba49f50c1a6829dee16f64d136e2f0406c7115
|
d992f98d3c5d009c567e9dac83f38770d8d72f77
|
/postCMeval/annoate_summary_with_pscore.py
|
942920673d5212449ff7547f0710420b4b39e90f
|
[] |
no_license
|
rlleras/quasiClique
|
c65643dcc35e2426e4c519ee1b3400895e9610cc
|
d57be41a213d2e57778d2feb9c103594ebbbf705
|
refs/heads/master
| 2021-01-21T20:53:05.841720
| 2012-02-28T22:49:17
| 2012-02-28T22:49:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
import os
import sys
import glob
import csv
def annotate_rank_summary_with_pscore(filename, delimiter=','):
"""
Given a rank_cmfinder.pl-output summary file X,
create a new file X.pscore_added that has the motifs' pscores appended
"""
motif_dir = os.path.dirname(filename)
in_csv = csv.DictReader(open(filename), delimiter=delimiter) # cmfinder rank summaries are comma-separated
with open(filename + '.pscore_added', 'w') as out_f:
if in_csv.fieldnames is None:
print >> sys.stderr, "file {0} is odd. IGNORE now".format(filename)
return
new_fieldnames = in_csv.fieldnames + ['pscore']
out_csv = csv.DictWriter(out_f, new_fieldnames, delimiter=delimiter)
# need to write out the field names
#out_csv.writeheader()# lol this function only in 2.7 and i have 2.6 Orz
out_f.write(delimiter.join(new_fieldnames) + '\n')
for obj in in_csv:
motif_full_path = os.path.join(motif_dir, obj['motif'])
pscore = os.popen("grep \"Total pair posterior\" {0}.pscoreout".format(motif_full_path)).read().strip()
obj['pscore'] = float( pscore[len('Total pair posterior '):] )
out_csv.writerow(obj)
if __name__ == "__main__":
for filename in glob.iglob('motifs/*/*.fna.summary'):
print >> sys.stderr, "annotating pscore to {0}.pscore_added....".format(filename)
annotate_rank_summary_with_pscore(filename)
|
[
"magdoll@gmail.com"
] |
magdoll@gmail.com
|
4532e075eae5a1ddc4b9609e44839947d035ec9f
|
a68f37fcbf069c0656d4838af7386d6a9919fe59
|
/company_account/decorators.py
|
1454352528e707241395d14220ced8df0d57a92c
|
[] |
no_license
|
mitchellpottratz/JobMatch
|
7dd2cbd979ca55cf651bcee4356a97e061145b90
|
797a9b1c3dfad57c05db52384d87d5f82be632f5
|
refs/heads/master
| 2022-11-26T08:44:27.535809
| 2019-12-13T20:05:11
| 2019-12-13T20:05:11
| 224,901,935
| 0
| 0
| null | 2022-11-22T04:53:46
| 2019-11-29T18:08:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
from functools import wraps
from django.http import HttpResponseRedirect
# this decorators checks if the current user is a company_user, and redirects
# them to the login user if they are not a company_user
def company_account_required(function):
@wraps(function)
def wrap(request, *args, **kwargs):
if not request.user.company_user:
return HttpResponseRedirect('/users/login/')
else:
return function(request, *args, **kwargs)
return wrap
|
[
"mitchellpottratz@Mitchells-MacBook-Air.local"
] |
mitchellpottratz@Mitchells-MacBook-Air.local
|
6add13271b6ebf08177a20b058dc78e738d70a03
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/5320734/snippet.py
|
5d6a295d577a6aa909c64302aa627f27dc8e67b7
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
import os
import matplotlib.pyplot as plt
def save(path, ext='png', close=True, verbose=True):
"""Save a figure from pyplot.
Parameters
----------
path : string
The path (and filename, without the extension) to save the
figure to.
ext : string (default='png')
The file extension. This must be supported by the active
matplotlib backend (see matplotlib.backends module). Most
backends support 'png', 'pdf', 'ps', 'eps', and 'svg'.
close : boolean (default=True)
Whether to close the figure after saving. If you want to save
the figure multiple times (e.g., to multiple formats), you
should NOT close it in between saves or you will have to
re-plot it.
verbose : boolean (default=True)
Whether to print information about when and where the image
has been saved.
"""
# Extract the directory and filename from the given path
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
# If the directory does not exist, create it
if not os.path.exists(directory):
os.makedirs(directory)
# The final path to save to
savepath = os.path.join(directory, filename)
if verbose:
print("Saving figure to '%s'..." % savepath),
# Actually save the figure
plt.savefig(savepath)
# Close it
if close:
plt.close()
if verbose:
print("Done")
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
64c32586630e0c01a48bb1e4907ab4ac66192273
|
98d8b7dd018b95dbf2c81431561546fd9619e391
|
/model/ItemLimitModel.py
|
4c9e5c484728ea86929619b001baca2ad8e6fea9
|
[] |
no_license
|
c1xfr2e/kkyadmin
|
1642c42659724a676000768bab559684581f3b07
|
e5cef8b8ed63d3db8c1ff22af0edf44c36c4df63
|
refs/heads/master
| 2021-09-08T08:55:17.448640
| 2018-03-09T00:07:46
| 2018-03-09T00:07:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""""""
from third_party.orm.field import StringField
from third_party.orm.field import IntegerField
from third_party.orm.field import ObjectIdField
from third_party.orm.field import ListField
from third_party.orm import Document
from tornado import gen
import settings
import time
class ItemLimitModel(Document):
meta = {
'db': settings.mongodb.SHARK_DB,
'collection': 'item_limit'
}
item_id = ObjectIdField(required=True)
user_id = ObjectIdField(required=True)
counter = IntegerField(required=True, default=int(0))
updated_time = ListField(IntegerField(), required=True)
@classmethod
@gen.coroutine
def UpdateUserItemCounter(cls, user_id, item_id, counter_inc):
condition = {
"user_id": user_id,
"item_id": item_id
}
setter = {
"$inc": {
"counter": counter_inc
},
"$push": {
"updated_time": int(time.time() * 1000)
}
}
result = yield cls.update(condition, setter, upsert=True)
raise gen.Return(result)
@classmethod
@gen.coroutine
def GetUserItemLimit(cls, user_id, item_id):
condition = {
"user_id": user_id,
"item_id": item_id
}
result = yield cls.find_one(condition)
raise gen.Return(result)
|
[
"gurity@163.com"
] |
gurity@163.com
|
c47b81eae5619450ed32f1b322237587c0217bf4
|
33d490698f2958f2d53a6436043959bac5c9f63d
|
/l10n_es_aeat_mod340/__openerp__.py
|
5bc97b620692f733dc328a58c8b83803b21a8568
|
[] |
no_license
|
ideosoft/odoo-modules
|
cf1a4bf0a1f0f25bfa44a83f8c10a2c73baed67e
|
3183a533ec9b89a57fd2b4c09cca0111afc86730
|
refs/heads/master
| 2021-03-30T18:13:42.873503
| 2016-07-14T13:46:01
| 2016-07-14T13:46:01
| 49,328,128
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,276
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Ting (http://www.ting.es) All Rights Reserved.
# Copyright (c) 2011-2013 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas Izquierdo <ignacio@acysos.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Generación de fichero modelo 340 y libro de IVA',
'version': '2.0',
'author': "Acysos S.L., "
"Francisco Pascual (Ting), "
"Nan-tic, "
"Odoo Community Association (OCA)",
'website': 'www.acysos.com, www.ting.es, www.nan-tic.com',
'category': 'Localisation/Accounting',
'description': '''
Módulo para la presentación del modelo 340. Exportación a formato AEAT. Libro
de IVA
Los impuestos incluidos en este modelo se indican en el Código base cuenta. Por
defecto actualiza todos los código base que deban incluirse.
Si el plan contable esta instalado recuerde utilizar account_chart_update para
actualizar
los códigos. Contabilidad y Finanzas -> Configuración -> Contabilidad
Financiera -> Actualizar plan contable a partir de una plantila de plan
contable
Búsqueda de facturas emitidas y recibidas.
Exportación a formato de AEAT de facturas emitidas y recibidas.
Exportación de facturas con varios tipos impositivos. Clave de operación C.
Facturas intracomunitarias excepto las operaciones a las que hace referencia el
artículo 66 del RIVA que tienen un tratamiento especial.
Facturas rectificativas.
Facturas resumen de tiques.
Permite imprimir el libro de IVA, basado en la misma legislación.
---- COSAS PENDIENTES (TODO LIST) ---------------------------------------------
Facturas bienes de inversión
Facturas intracomunitarias. Operaciones a las que hace referencia el artículo
66 del RIVA.
Asientos contables de resumen de tiques
Exportación de asientos resumen de facturas
''',
'license': 'AGPL-3',
'depends': [
'account',
'base_vat',
'l10n_es',
'l10n_es_aeat',
'account_refund_original',
'account_chart_update',
],
'data': [
'report/report_view.xml',
'wizard/export_mod340_to_boe.xml',
'mod340_view.xml',
'mod340_workflow.xml',
'security/ir.model.access.csv',
'res_partner_view.xml',
'mod340_sequence.xml',
'account_invoice_view.xml',
'account_view.xml',
'taxes_data.xml',
],
'installable': True,
}
|
[
"jgomez@ideosoft.es"
] |
jgomez@ideosoft.es
|
6e475fa396e9a6dcef96eed3caf04907181bd82f
|
03f9b8bdea312636afb4df3737b55cb0cc4b21ff
|
/RLEIterator.py
|
abaa4b9c89f81c3310c20d22aec12ca5c65ea68a
|
[] |
no_license
|
ellinx/LC-python
|
f29dd17bbe15407ba0d06ad68386efdc9a343b56
|
9190d3d178f1733aa226973757ee7e045b7bab00
|
refs/heads/master
| 2021-06-01T15:21:24.379811
| 2020-10-29T04:37:07
| 2020-10-29T04:37:07
| 132,704,788
| 1
| 1
| null | 2019-05-15T03:26:11
| 2018-05-09T05:13:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
"""
Write an iterator that iterates through a run-length encoded sequence.
The iterator is initialized by RLEIterator(int[] A), where A is a run-length encoding of some sequence.
More specifically, for all even i, A[i] tells us the number of times that
the non-negative integer value A[i+1] is repeated in the sequence.
The iterator supports one function: next(int n),
which exhausts the next n elements (n >= 1) and returns the last element exhausted in this way.
If there is no element left to exhaust, next returns -1 instead.
For example, we start with A = [3,8,0,9,2,5], which is a run-length encoding of the sequence [8,8,8,5,5].
This is because the sequence can be read as "three eights, zero nines, two fives".
Example 1:
Input: ["RLEIterator","next","next","next","next"], [[[3,8,0,9,2,5]],[2],[1],[1],[2]]
Output: [null,8,8,5,-1]
Explanation:
RLEIterator is initialized with RLEIterator([3,8,0,9,2,5]).
This maps to the sequence [8,8,8,5,5].
RLEIterator.next is then called 4 times:
.next(2) exhausts 2 terms of the sequence, returning 8. The remaining sequence is now [8, 5, 5].
.next(1) exhausts 1 term of the sequence, returning 8. The remaining sequence is now [5, 5].
.next(1) exhausts 1 term of the sequence, returning 5. The remaining sequence is now [5].
.next(2) exhausts 2 terms, returning -1. This is because the first term exhausted was 5,
but the second term did not exist. Since the last term exhausted does not exist, we return -1.
Note:
1. 0 <= A.length <= 1000
2. A.length is an even integer.
3. 0 <= A[i] <= 10^9
4. There are at most 1000 calls to RLEIterator.next(int n) per test case.
5. Each call to RLEIterator.next(int n) will have 1 <= n <= 10^9.
"""
class RLEIterator:
def __init__(self, A):
"""
:type A: List[int]
"""
self.A = A
self.idx = 0
def next(self, n):
"""
:type n: int
:rtype: int
"""
while self.idx<len(self.A) and self.A[self.idx]==0:
self.idx += 2
while self.idx<len(self.A) and n>=self.A[self.idx]:
n -= self.A[self.idx]
self.idx += 2
if n==0:
return self.A[self.idx-1]
if self.idx>=len(self.A):
return -1
self.A[self.idx] -= n
return self.A[self.idx+1]
# Your RLEIterator object will be instantiated and called as such:
# obj = RLEIterator(A)
# param_1 = obj.next(n)
|
[
"ellin.xll@gmail.com"
] |
ellin.xll@gmail.com
|
69a1f0b6fd6245b262107582163cc8ddd44ea88e
|
38a92e99215d4bd6146374408513e7b8446ec828
|
/ocpu/views.py
|
59a753c4ddfbdf47b2487cad764f92feee416851
|
[] |
no_license
|
andi-nl/ANDI-frontend
|
b953ca50d1c1f3a4d6d12de8e68b7d81e815f0dc
|
8d41bc8b6764444ab7b7a7ac053cdf8c317a568a
|
refs/heads/master
| 2020-04-14T23:12:14.309292
| 2017-06-22T12:08:43
| 2017-06-22T12:08:43
| 41,303,252
| 1
| 12
| null | 2017-01-02T13:17:41
| 2015-08-24T13:00:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 932
|
py
|
import json
import logging
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.core.exceptions import SuspiciousOperation
from .utils import do_normcomp, do_calccomposite
logger = logging.getLogger(__name__)
@login_required
@csrf_exempt
def compute(request):
logger.info('Called compute')
try:
parameters = json.loads(request.body.decode('utf-8'))
except:
raise SuspiciousOperation('Invalid input for ocpu.')
method = parameters.get('method')
if method == 'normcomp':
return do_normcomp(parameters)
elif method == 'calccomposite':
return do_calccomposite(parameters)
else:
msg = 'ocpu called with "{}"; method not implemented'.format(method)
logger.error(msg)
return JsonResponse({'error': 'method "{}" not implemented in ocpu.'})
|
[
"j.vanderzwaan@esciencecenter.nl"
] |
j.vanderzwaan@esciencecenter.nl
|
7b6058bd35243dcac57bec06afb8147e964d4555
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_softwoods.py
|
bf431aa729f540bf80fb4fb0ce05106eaa0eb798
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.nouns._softwood import _SOFTWOOD
#calss header
class _SOFTWOODS(_SOFTWOOD, ):
def __init__(self,):
_SOFTWOOD.__init__(self)
self.name = "SOFTWOODS"
self.specie = 'nouns'
self.basic = "softwood"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
850ecdca5bc07271e05e480aefeb8432c7ea8f48
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/EPXH424t2SSjMzms5_5.py
|
6f2e5df2ee3869baab5543822cd15128d9777f20
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
"""
Create a function that takes both a string and a list of integers and
rearranges the letters in the string to be in the order specified by the index
numbers. Return the "remixed" string.
### Examples
remix("abcd", [0, 3, 1, 2]) ➞ "acdb"
The string you'll be returning will have:
* "a" at index 0
* "b" at index 3
* "c" at index 1
* "d" at index 2
... because the order of those characters maps to their corresponding numbers
in the index list.
remix("PlOt", [1, 3, 0, 2]) ➞ "OPtl"
remix("computer", [0, 2, 1, 5, 3, 6, 7, 4]) ➞ "cmourpte"
### Notes
* Be sure not to change the original case.
* Assume you'll be given a string and list of equal length, both containing valid characters (A-Z, a-z, or 0-9).
* The list of numbers could potentially be more than nine (i.e. double figures).
"""
remix=lambda t,l:''.join(y for _,y in sorted(zip(l,t)))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
8ebea4a768a74346f669199a18f9034a5a72fcd3
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part006912.py
|
b626d2d86f3f2c0c06e456ec94bdbe994424a7eb
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher101446(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.1.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.1.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher101446._instance is None:
CommutativeMatcher101446._instance = CommutativeMatcher101446()
return CommutativeMatcher101446._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 101445
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
b66d2ae78a2abe5aa52b983e5773f4b95f0dfe9f
|
11334e46d3575968de5062c7b0e8578af228265b
|
/Examples/OpenCV/PPOCV/C6/arithmetic.py
|
f062fdf7c94256bec4b4af759f4f493579109cb4
|
[] |
no_license
|
slowrunner/Carl
|
99262f16eaf6d53423778448dee5e5186c2aaa1e
|
1a3cfb16701b9a3798cd950e653506774c2df25e
|
refs/heads/master
| 2023-06-08T05:55:55.338828
| 2023-06-04T02:39:18
| 2023-06-04T02:39:18
| 145,750,624
| 19
| 2
| null | 2023-06-04T02:39:20
| 2018-08-22T18:59:34
|
Roff
|
UTF-8
|
Python
| false
| false
| 2,217
|
py
|
#!/usr/bin/env python3
#
# arithmetic.py
"""
Documentation:
PPOCV C6
"""
# from __future__ import print_function # use python 3 syntax but make it compatible with python 2
# from __future__ import division # ''
import sys
try:
sys.path.append('/home/pi/Carl/plib')
import speak
import tiltpan
import status
import battery
import myDistSensor
import runLog
Carl = True
except:
Carl = False
import easygopigo3 # import the GoPiGo3 class
import numpy as np
import datetime as dt
import argparse
from time import sleep
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to image file")
# ap.add_argument("-n", "--num", type=int, default=5, help="number")
args = vars(ap.parse_args())
# print("Started with args:",args)
# constants
# varibles
def main():
if Carl: runLog.logger.info("Started")
egpg = easygopigo3.EasyGoPiGo3(use_mutex=True)
if Carl:
tiltpan.tiltpan_center()
sleep(0.5)
tiltpan.off()
try:
image = cv2.imread(args["image"])
cv2.imshow("Original", image)
print("max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print("min of 0: {}".format(cv2.subtract(np.uint8([50]), np.uint8([100]))))
print("wrap around: {}".format(np.uint8([200]) + np.uint8([100])))
print("wrap around: {}".format(np.uint8([50]) - np.uint8([100])))
M = np.ones(image.shape, dtype = "uint8") * 100
added = cv2.add(image, M)
cv2.imshow("Added", added)
M = np.ones(image.shape, dtype = "uint8") * 50
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted", subtracted)
cv2.waitKey(0)
except KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard.
if (egpg != None): egpg.stop() # stop motors
print("\n*** Ctrl-C detected - Finishing up")
sleep(1)
if (egpg != None): egpg.stop()
if Carl: runLog.logger.info("Finished")
sleep(1)
if (__name__ == '__main__'): main()
|
[
"slowrunner@users.noreply.gethub.com"
] |
slowrunner@users.noreply.gethub.com
|
6b692d10b1730270b76669e1b35e9650edd7aaf2
|
01dfd817931803f5ca3a41832171082e84323d9e
|
/apps/applogReg/urls.py
|
fd05ed8fe6c674eb9ffb88c415767ef447c8705f
|
[] |
no_license
|
alialwahish/rep92
|
41d715b043233a85ce968d835a3c7ba303593445
|
743e2684783f8f86df9e5b8493ab267679e250bc
|
refs/heads/master
| 2022-11-26T21:33:23.643273
| 2018-05-23T01:22:42
| 2018-05-23T01:22:42
| 134,494,751
| 0
| 0
| null | 2022-11-22T01:06:15
| 2018-05-23T01:20:24
|
Python
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.index),
url(r'^main$',views.main),
url(r'^login$',views.login),
url(r'^register$',views.register),
url(r'^quotes$',views.quotes),
url(r'logout$',views.logout),
url(r'^add_quote$',views.add_quote),
url(r'^add_fav/(?P<id>\d+)/$',views.add_fav),
url(r'^view_user/(?P<id>\d+)$',views.view_user),
url(r'^remove_fav_quote/(?P<id>\d+)/$',views.remove_fav_quote),
]
|
[
"bayati.ali@icloud.com"
] |
bayati.ali@icloud.com
|
3569db6b3ac516591e08da38e3f8674310ee5db2
|
2e29ed138ab0fdb7e0a6e87b7c52c097b350fecf
|
/MechPro Tech/randomElementSets.py
|
be6c201ac4e08bf05befb843bb10394f4b91df6b
|
[] |
no_license
|
ronniegeiger/Abaqus-Scripts
|
1e9c66664bd7dc7e5264bf763f15936eadcff529
|
c071bbfe0e6c54148dfd4a23f786f017dfef4ae4
|
refs/heads/master
| 2023-03-18T06:33:13.690549
| 2018-08-14T11:37:07
| 2018-08-14T11:37:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,431
|
py
|
# Script to create element sets based on % distribution
from textRepr import prettyPrint as pp
import random
N_150mm = 3591
N_100mm = 7271
N_50mm = 38778
n_200mm = 2020
n_150mm = 3591
n_100mm = 8079
n_50mm = 32315
N_total = float(N_150mm + N_100mm + N_50mm + n_200mm + n_150mm + n_100mm + n_50mm)
elset = {}
elset['N_150mm'] = []
elset['N_100mm'] = []
elset['N_50mm'] = []
elset['n_200mm'] = []
elset['n_150mm'] = []
elset['n_100mm'] = []
elset['n_50mm'] = []
for i in range(1,95645):
rnd = random.random()
if rnd <= N_150mm/N_total:
elset['N_150mm'].append(i)
elif rnd > N_150mm/N_total and rnd <= (N_150mm+N_100mm)/N_total:
elset['N_100mm'].append(i)
elif rnd > (N_150mm+N_100mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm)/N_total:
elset['N_50mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm)/N_total:
elset['n_200mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm+n_200mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm)/N_total:
elset['n_150mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm+n_100mm)/N_total:
elset['n_100mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm+n_100mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm+n_100mm+n_50mm)/N_total:
elset['n_50mm'].append(i)
else:
print 'Number %i not assigned into an element set'%i
print 'N150mm %s, %s'%(len(elset['N_150mm'])/N_total, N_150mm/N_total)
print 'N100mm %s, %s'%(len(elset['N_100mm'])/N_total, N_100mm/N_total)
print 'N50mm %s, %s'%( len(elset['N_50mm'])/N_total, N_50mm/N_total)
print 'n200mm %s, %s'%(len(elset['n_200mm'])/N_total, n_200mm/N_total)
print 'n150mm %s, %s'%(len(elset['n_150mm'])/N_total, n_150mm/N_total)
print 'n100mm %s, %s'%(len(elset['n_100mm'])/N_total, n_100mm/N_total)
print 'n50mm %s, %s'%( len(elset['n_50mm'])/N_total, n_50mm/N_total)
file = open('elementSets.txt', 'w')
for key, elementList in elset.items():
if len(elementList) > 0:
file.write('*Elset, elset=%s \n'%key)
myList = [elementList[i:i+10] for i in range(0,len(elementList), 10)]
for subList in myList:
if len(subList) == 10:
file.write('%s, \n'%str(subList).strip('[]'))
else:
file.write('%s\n'%str(subList).strip('[]'))
file.close()
|
[
"noreply@github.com"
] |
ronniegeiger.noreply@github.com
|
343ba2e8c8dbc7bad29a57b1416f628800566367
|
b0bd3342c244ebf30ae5ab29daa078f2b39010f7
|
/SimpleEmbedModel.py
|
eea650c39544213933830a7ab48737a89881b223
|
[] |
no_license
|
naiqili/itime_learning
|
30a8af7f1234277162ccdd4c69cd9f9a4a7ab412
|
d9b191bb32a7e49cb99443d7dccea5bb392aee90
|
refs/heads/master
| 2021-06-19T04:54:06.239320
| 2017-06-26T13:35:39
| 2017-06-26T13:35:39
| 92,792,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
import tensorflow as tf
import numpy as np
class SimpleEmbedModel():
def __init__(self, conf):
self.conf = conf
self.uif_mat = np.load(conf.uif_path)
self.iur_mat = np.load(conf.iur_path)
def add_variables(self, reuse=False):
conf = self.conf
with tf.variable_scope('Fixed', reuse=reuse):
self.uif = tf.get_variable('uif',
[conf.user_size,
conf.item_size,
len(conf.recAlgos)],
initializer=tf.constant_initializer(self.uif_mat),
trainable=False)
self.iur = tf.get_variable('iur',
[conf.item_size,
conf.user_size+conf.feat_size],
initializer=tf.constant_initializer(self.iur_mat),
trainable=False)
with tf.variable_scope('Weights', reuse=reuse):
self.v1 = tf.get_variable('v1',
[len(conf.recAlgos), 1])
self.v2 = tf.get_variable('v2',
[conf.z_size, 1])
self.W_z = tf.get_variable('W_z',
[conf.z_size,
conf.embed_size,
conf.embed_size])
with tf.variable_scope('Embeddings', reuse=reuse):
self.embed = tf.get_variable('embed',
[conf.user_size+conf.feat_size, conf.embed_size])
self.ph_selected_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_all_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_groundtruth = tf.placeholder(tf.int32, shape=[])
self.ph_user = tf.placeholder(tf.int32, shape=[])
def build_model(self):
uif_u = self.uif[self.ph_user]
uif_u = tf.contrib.layers.dropout(uif_u, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
score1 = tf.matmul(uif_u, self.v1)
def fn_i0(): # (choices, score_sum) when i = 0
return (self.ph_all_items, tf.squeeze(score1))
def fn_not_i0(): # (choices, score_sum) when i != 0
selected_items = self.ph_selected_items
iur = self.iur
iur = tf.contrib.layers.dropout(iur, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
iur_embed = tf.matmul(iur, self.embed)
se = tf.nn.embedding_lookup(iur, selected_items)
se_embed = tf.matmul(se, self.embed)
se_embed = tf.transpose(se_embed)
# see test/einsum_test.py
iur_w = tf.einsum('nu,zud->znd', iur_embed, self.W_z)
iur_w_se = tf.einsum('znu,uk->znk', iur_w, se_embed)
mp_iur_w_se = tf.reduce_max(iur_w_se, axis=2) # z x n
mp_iur_w_se = tf.transpose(mp_iur_w_se) # n x z
score2 = tf.matmul(mp_iur_w_se, self.v2) # n x 1
score_sum = tf.squeeze(score1 + score2) # vec of n
choices = tf.reshape(tf.sparse_tensor_to_dense(tf.sets.set_difference([self.ph_all_items], [selected_items])), [-1]) # vec of remaining choices
return (choices, score_sum)
i = tf.shape(self.ph_selected_items)[0]
choices, score_sum = tf.cond(tf.equal(i, 0),
lambda: fn_i0(),
lambda: fn_not_i0())
eff_score = tf.gather(score_sum, choices, validate_indices=False) # vec of choices
_argmax = tf.argmax(eff_score, axis=0)
_pred = tf.gather(choices, _argmax, validate_indices=False)
_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=score_sum, labels=self.ph_groundtruth)
self.loss = _loss
self.pred = _pred
self.loss_summary = tf.summary.scalar('Loss', self.loss)
if self.conf.is_training:
self.train_op = tf.train.AdamOptimizer(self.conf.lr).minimize(self.loss)
|
[
"naiqil@student.unimelb.edu.au"
] |
naiqil@student.unimelb.edu.au
|
0ca716046914007256d10bbb37b5f1a4cafa8580
|
71b7b6d84a61f514b038fac7741e6d16973fcaa9
|
/build/object_manipulation_msgs/catkin_generated/pkg.installspace.context.pc.py
|
3e6fd0a4ff97ee24b62a27bea0521aebb8561551
|
[] |
no_license
|
YiKangJ/perception_driven_ws
|
15c02e523f1a708fe63b216d73019c8c2bde97a1
|
0a0f8fcbe3f5fed26439f449999b85f1e38c0f70
|
refs/heads/master
| 2020-04-01T19:47:48.372111
| 2018-10-18T06:17:57
| 2018-10-18T06:17:57
| 153,571,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/jyk/perception_driven_ws/install/include".split(';') if "/home/jyk/perception_driven_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs;std_msgs;geometry_msgs;sensor_msgs;trajectory_msgs;sensor_msgs;household_objects_database_msgs;shape_msgs;manipulation_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "object_manipulation_msgs"
PROJECT_SPACE_DIR = "/home/jyk/perception_driven_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"jinyikangjyk@163.com"
] |
jinyikangjyk@163.com
|
373e4f049cea8386fbd57e288660868ad6f2a5ab
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/tests/unit/modules/storage/netapp/test_na_ontap_net_port.py
|
07ea6a104c088e71872248865edf07e619628ac8
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,647
|
py
|
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from ansible_collections.ansible.community.tests.unit.compat import unittest
from ansible_collections.ansible.community.tests.unit.compat.mock import patch, Mock
from ansible_collections.ansible.community.plugins.module_utils import basic
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes
import ansible_collections.ansible.community.plugins.module_utils.netapp as netapp_utils
from ansible_collections.ansible.community.plugins.modules.na_ontap_net_port \
import NetAppOntapNetPort as port_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'port':
xml = self.build_port_info(self.data)
self.xml_out = xml
return xml
@staticmethod
def build_port_info(port_details):
''' build xml data for net-port-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-port-info': {
# 'port': port_details['port'],
'mtu': port_details['mtu'],
'is-administrative-auto-negotiate': 'true',
'ipspace': 'default',
'administrative-flowcontrol': port_details['flowcontrol_admin'],
'node': port_details['node']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_port = {
'node': 'test',
'ports': 'a1',
'flowcontrol_admin': 'something',
'mtu': '1000'
}
def mock_args(self):
return {
'node': self.mock_port['node'],
'flowcontrol_admin': self.mock_port['flowcontrol_admin'],
'ports': [self.mock_port['ports']],
'mtu': self.mock_port['mtu'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_port_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_net_port object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_net_port object
"""
obj = port_module()
obj.autosupport_log = Mock(return_value=None)
if data is None:
data = self.mock_port
obj.server = MockONTAPConnection(kind=kind, data=data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
port_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_port(self):
''' Test if get_net_port returns None for non-existent port '''
set_module_args(self.mock_args())
result = self.get_port_mock_object().get_net_port('test')
assert result is None
def test_get_existing_port(self):
''' Test if get_net_port returns details for existing port '''
set_module_args(self.mock_args())
result = self.get_port_mock_object('port').get_net_port('test')
assert result['mtu'] == self.mock_port['mtu']
assert result['flowcontrol_admin'] == self.mock_port['flowcontrol_admin']
def test_successful_modify(self):
''' Test modify_net_port '''
data = self.mock_args()
data['mtu'] = '2000'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_multiple_ports(self):
''' Test modify_net_port '''
data = self.mock_args()
data['ports'] = ['a1', 'a2']
data['mtu'] = '2000'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert exc.value.args[0]['changed']
@patch('ansible_collections.ansible.community.plugins.modules.na_ontap_net_port.NetAppOntapNetPort.get_net_port')
def test_get_called(self, get_port):
''' Test get_net_port '''
data = self.mock_args()
data['ports'] = ['a1', 'a2']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert get_port.call_count == 2
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
7885a77c550774f08a3cff1f19f60dcc326bf6ef
|
f780b05549bea9ecb0b80e4e9ea63376aa59f962
|
/iplauction/migrations/0004_auto_20160305_1217.py
|
8a77f654b1476c81a8fd5872f2939d3becf242b4
|
[] |
no_license
|
PunitGr/django-kickstart
|
ebff3e7e60a27091468f45a35e3b562954609168
|
5e8a21b0408ade1be4860deb3bc2cb80f033b159
|
refs/heads/master
| 2021-01-24T20:26:06.970847
| 2016-03-07T14:33:17
| 2016-03-07T14:33:17
| 52,964,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-05 12:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('iplauction', '0003_auto_20160305_1102'),
]
operations = [
migrations.AddField(
model_name='team',
name='slug',
field=models.SlugField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='player',
name='slug',
field=models.SlugField(blank=True, default='', null=True),
),
]
|
[
"groove679@gmail.com"
] |
groove679@gmail.com
|
34c8f2b1fad1c29421bec555fae6b807a7102a8e
|
599709e7687a78f92b268315590d6ad750ce97d6
|
/calc/opt_h_pi/1skp_l/gto_part_shift/w1_dense/search/calc.py
|
2e3b85307b244e4655cabc6fd13518b2f7d18fa3
|
[] |
no_license
|
ReiMatsuzaki/cbasis2
|
b99d096150d87f9301ed0e34f7be5f0203e4a81e
|
86f21146fab6fc6f750d02fb2200ea94616ca896
|
refs/heads/master
| 2021-01-19T23:15:32.864686
| 2017-04-27T07:29:26
| 2017-04-27T07:29:26
| 88,953,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,798
|
py
|
import sys
import numpy as np
import pandas as pd
from itertools import product, combinations
import subprocess
sys.path.append("../../../../../../src_py/nnewton")
sys.path.append("../../../../../../r1basis")
from r1basis import *
from opt_green import *
## one GTO optimization value is 0.0105604,-0.0548991
with open("search.out", "w") as f:
print_timestamp("start", f)
yos = [0.0001, 0.00014, 0.0002, 0.0003, 0.0005, 0.0007,
0.001, 0.0014, 0.002, 0.003, 0.005, 0.007,
0.01, 0.014, 0.02, 0.03, 0.05, 0.07]
xos = [0.0001, 0.00014, 0.0002, 0.0003, 0.0005, 0.0007,
0.001, 0.0014, 0.002, 0.003, 0.005, 0.007,
0.01, 0.014, 0.02, 0.03, 0.05, 0.07]
zos = [ x-1.0j*y for y in yos for x in xos]
num = 20
numopt = 5
z0 = 0.005
z1 = 20.0
r = (z1/z0)**(1.0/(num-1))
zs = [z0*r**n for n in range(num)]
z0s = zs[0:numopt]
z1s = zs[numopt:]
f = open('search.csv', 'w')
for z0 in zos:
basis_info = [('shift', True, 2, z0s, z0),
('shift', False, 2, z1s, 0.0)]
res = opt_main(
basis_type = 'GTO',
basis_info = basis_info,
target = 'h_pi',
channel= '1s->kp',
dipole = 'length',
w0 = 1.0,
tol = pow(10.0, -5.0),
maxit = 50,
conv = 'grad',
fdif = 0.0001,
grad = False,
hess = False,
outfile = 'res.out',
print_level = 0)
opt_res = res['w_res_list'][0][1]
z0 = opt_res.x[0]
eps = 0.00001
if(z0.real > 0.0 and z0.imag < 0.0 and
100.0 > z0.real and z0.imag > -100.0 and
opt_res.success):
subprocess.call("cat res.out >> search.out", shell=True)
f.write("{0},{1}\n".format(z0.real, z0.imag))
f.close()
with open("search.out", "a") as f:
print_timestamp("end", f)
|
[
"matsuzaki.rei@sepia.chem.keio.ac.jp"
] |
matsuzaki.rei@sepia.chem.keio.ac.jp
|
020cc492835e3f6213e03d6a0067ede9641141eb
|
9c73dd3043f7db7c9ec76d560484e99ad134fdb6
|
/students/douglas_klos/lesson05/activity/code/rdbms_api.py
|
1d84511af15a33b1c59531bb3f00eef544715492
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/py220-online-201904-V2
|
546b316025b680ca28d24b523663095398616b13
|
ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56
|
refs/heads/master
| 2022-12-10T03:14:25.514630
| 2019-06-11T02:14:17
| 2019-06-11T02:14:17
| 179,139,181
| 1
| 19
| null | 2022-12-08T01:43:38
| 2019-04-02T18:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 985
|
py
|
class BaseModel(Model):
class Meta:
database = database
class Person(BaseModel):
"""
This class defines Person, which maintains details of someone
for whom we want to research career to date.
"""
person_name = CharField(primary_key=True, max_length=30)
lives_in_town = CharField(max_length=40)
nickname = CharField(max_length=20, null=True)
class Job(BaseModel):
"""
This class defines Job, which maintains details of past Jobs
held by a Person.
"""
job_name = CharField(primary_key=True, max_length=30)
start_date = DateField(formats="YYYY-MM-DD")
end_date = DateField(formats="YYYY-MM-DD")
salary = DecimalField(max_digits=7, decimal_places=2)
person_employed = ForeignKeyField(Person, related_name="was_filled_by", null=False)
new_person = Person.create(
person_name="Fred", lives_in_town="Seattle", nickname="Fearless"
)
new_person.save()
aperson = Person.get(Person.person_name == "Fred")
|
[
"dougklos@gmail.com"
] |
dougklos@gmail.com
|
0ca1c1b2b6d4a5a92a1cf44d6e0600fa459c73fa
|
78f43f8bd07ae0fc91738a63cd7bbca08ae26066
|
/leetcode/interval/merge_interval.py
|
9f4a681237c817d9390d7cf79195feeb3effddb0
|
[] |
no_license
|
hanrick2000/LeetcodePy
|
2f3a841f696005e8f0bf4cd33fe586f97173731f
|
b24fb0e7403606127d26f91ff86ddf8d2b071318
|
refs/heads/master
| 2022-04-14T01:34:05.044542
| 2020-04-12T06:11:29
| 2020-04-12T06:11:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
from leetcode.interval.interval import Interval
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if len(intervals) <= 1:
return intervals
def overlap(l1, l2):
"""
:type l1: Interval
:type l2: Interval
"""
return l2.start <= l1.end
def merge_one(l1, l2):
"""
:type l1: Interval
:type l2: Interval
"""
return Interval(l1.start, max(l1.end, l2.end))
def eq(l1, l2):
"""
:type l1: Interval
:type l2: Interval
"""
return l1.start == l2.start and l1.end == l2.end
ret = []
# Greedy: Earliest start time
sorted_itvs = sorted(intervals, key=lambda x: x.start)
merged_itv = sorted_itvs[0]
for i in range(1, len(sorted_itvs)):
itv = sorted_itvs[i]
if overlap(merged_itv, itv):
merged_itv = merge_one(merged_itv, itv)
else:
ret.append(merged_itv)
merged_itv = itv
if not ret or not eq(ret[-1], merged_itv):
ret.append(merged_itv)
return ret
|
[
"dofu@ebay.com"
] |
dofu@ebay.com
|
402200e5c6c3d708a3beb5e97137c59c1ddd8bd8
|
cf5f24e5a32f8cafe90d4253d727b1c0457da6a4
|
/algorithm/boj_21317.py
|
55af763cbe118028143f347a61cddc9f96f9e1d5
|
[] |
no_license
|
seoljeongwoo/learn
|
537659ca942875f6846646c2e21e1e9f2e5b811e
|
5b423e475c8f2bc47cb6dee09b8961d83ab08568
|
refs/heads/main
| 2023-05-04T18:07:27.592058
| 2021-05-05T17:32:50
| 2021-05-05T17:32:50
| 324,725,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
import sys
input = sys.stdin.readline
def solve(curr, ok):
if curr > n : return inf
if curr == n: return 0
if dp[curr][ok] != inf: return dp[curr][ok]
dp[curr][ok] = solve(curr+1,ok) + jump[curr][0]
dp[curr][ok] = min(dp[curr][ok] , solve(curr+2,ok) + jump[curr][1])
if ok: dp[curr][ok] = min(dp[curr][ok] , solve(curr+3,ok-1) + k)
return dp[curr][ok]
n = int(input())
jump = [[0]*2 for _ in range(n)]
for _ in range(n-1):
small, big = map(int,input().split())
jump[_+1][0] , jump[_+1][1] = small, big
k = int(input())
inf = int(1e9)+5
dp = [[inf]*2 for _ in range(n+1)]
print(solve(1,1))
|
[
"noreply@github.com"
] |
seoljeongwoo.noreply@github.com
|
ef16a7892614d638cd0b989921e2c479a82bda61
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/requests/api.py
|
abada96d4627a52bf0f6040ee2e08b0eff32c77c
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,253
|
py
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
|
[
"354142480@qq.com"
] |
354142480@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.