blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32c3db21179f71d7d3b81b90f71abe18b6ebcc50 | a757953b0fab1c4d05ce6b70b68b411df42bb887 | /raredecay/run_config/config.py | bf95bbd8a144a333eb7c63e6c9faeb67b38b1b7f | [
"Apache-2.0"
] | permissive | efueger/raredecay | 4e6ae0cff4bde925e2985335793dc7a138a5c772 | 4a92742016b2aea27e5156fee168d69d2c0361d0 | refs/heads/master | 2021-01-01T20:37:16.970625 | 2017-07-31T14:48:35 | 2017-07-31T14:48:35 | 98,898,305 | 0 | 0 | null | 2017-07-31T14:31:54 | 2017-07-31T14:31:54 | null | UTF-8 | Python | false | false | 1,379 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 13:44:43 2016
The configuration file for external operations.
@author: Jonas Eschle "Mayou36"
"""
RUN_NAME = 'Classifier optimization'
run_message = str("This could be your advertisement" +
" ")
OUTPUT_CFG = dict(
run_name=RUN_NAME,
output_path=None,
del_existing_folders=False,
output_folders=dict(
log="log",
plots="plots",
results="results",
config="config"
)
)
save_fig_cfg = dict(
file_format=['png', 'pdf'],
to_pickle=True,
dpi=150,
figsize=(2,10)
)
# ==============================================================================
# LOGGER CONFIGURATION BEGIN
# ==============================================================================
logger_cfg = dict(
logging_mode='both', # define where the logger is written to
# take 'both', 'file', 'console' or 'no'
log_level_file='debug',
# specifies the level to be logged to the file
log_level_console='warning', # 'warning',
# specify the level to be logged to the console
overwrite_file=True,
# specifies whether it should overwrite the log file each time
# or instead make a new one each run
log_file_name='logfile_',
# the beginning ofthe name of the logfile, like 'project1'
log_file_dir=None # will be set automatically
)
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
84384e7ac129c854281286bde8c8fa39109edf50 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/409/usersdata/308/79040/submittedfiles/av1_programa1.py | cf9b07d9090afe0bfb3912b3b86c0d8a9fbf8726 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | # -*- coding: utf-8 -*-
#Lendo valor do usuário
x = int(input('Informe o valor: '))
#testando se é par
if (x%2==0):
print('PAR')
else:
print('IMPAR')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d25ac17866c59602f3a74c473525baa8b1525ecc | 42186fa6507999ce60d334a0f04d9ae2127579cd | /shanghai/crypto/aessss/aes.py | 28f4167b8afbc6cd5a1d96fa31eac1158ba77de1 | [] | no_license | Imtinmin/CTF_Challenge | ef8b62b3a4a1741d814d989f795a243257ff6f2b | ea276596f9effdbe0cf9ef4457e2e676e652bb74 | refs/heads/master | 2022-12-21T12:40:40.625562 | 2020-04-30T03:27:56 | 2020-04-30T03:27:56 | 158,999,004 | 18 | 3 | null | 2022-12-10T04:34:27 | 2018-11-25T04:53:04 | PHP | UTF-8 | Python | false | false | 6,458 | py | # -*- coding:utf-8 -*-
import random
import sys
import string
from hashlib import sha256
import SocketServer
from Crypto.Cipher import AES
from secret import FLAG, IV, KEY
class Task(SocketServer.BaseRequestHandler):
def proof_of_work(self):
proof = ''.join(
[random.choice(string.ascii_letters+string.digits) for _ in xrange(20)])
# print proof
digest = sha256(proof).hexdigest()
self.request.send("sha256(XXXX+%s) == %s\n" % (proof[4:], digest))
self.request.send('Give me XXXX:')
x = self.request.recv(10)
x = x.strip()
if len(x) != 4 or sha256(x+proof[4:]).hexdigest() != digest:
return False
return True
def pad(self, s):
s += (256 - len(s)) * chr(256 - len(s))
ret = ['\x00' for _ in range(256)]
for index, pos in enumerate(self.s_box):
ret[pos] = s[index]
return ''.join(ret)
def unpad(self, s):
ret = ['\x00' for _ in range(256)]
for index, pos in enumerate(self.invs_box):
ret[pos] = s[index]
return ''.join(ret[0:-ord(ret[-1])])
s_box = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
]
invs_box = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
]
def encrypt(self, msg):
cipher = AES.new(KEY, AES.MODE_CBC, IV)
return cipher.encrypt(msg).encode('hex')
def handle(self):
if not self.proof_of_work():
return
self.request.settimeout(15)
req = self.request
flag_len = len(FLAG)
assert(flag_len == 33)
self.flag = self.pad(FLAG)
assert(len(self.flag) == 256)
while True:
req.sendall(
'Welcome to AES(WXH) encrypt system.\n1. get encrypted flag.\n2. pad flag.\n3.Do some encrypt.\nYour choice:')
cmd = req.recv(2).strip()
try:
cmd = int(cmd)
except ValueError:
cmd = 0
if cmd == 1:
enc = self.encrypt(self.flag)
req.sendall('Here is the encrypted flag: 0x%s\n' % enc)
elif cmd == 2:
req.sendall('Pad me something:')
self.flag = self.unpad(self.flag)[
:flag_len] + req.recv(1024).strip()
assert(len(self.flag) <= 256)
self.flag = self.pad(self.flag)
req.sendall('Done.\n')
elif cmd == 3:
req.sendall('What do you want to encrypt:')
msg = self.pad(req.recv(1024).strip())
assert(len(msg) <= 256)
enc = self.encrypt(msg)
req.sendall('Here is the encrypted message: 0x%s\n' % enc)
else:
req.sendall('Do not lose heart~ !% Once WXH AK IOI 2019 can Solved! WXH is the first in the tianxia!')
req.close()
return
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = '0.0.0.0', 23333
print 'Run in port:23333'
server = ThreadedServer((HOST, PORT), Task)
server.allow_reuse_address = True
server.serve_forever()
| [
"954093370@qq.com"
] | 954093370@qq.com |
d882a075eccf5c70c14258c6c98aa38b84b83009 | e37fa62da82ae60561e59027f8626facc9728b4a | /learn_spyder/downloads/Chapter2/solutions/Exercise2-4.py | 8d0998a34134a66f4b644de906051a217539109b | [] | no_license | nhuntwalker/mystuff | ea05551ae17cfc0a6e4286a0dd97fe85f39bfe89 | bf2e79a1213fea1249ce93ef28d0caeffc710eb1 | refs/heads/master | 2021-01-19T01:04:22.215725 | 2016-06-23T00:22:22 | 2016-06-23T00:22:22 | 13,390,985 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import ctypes as ct
# libc = ct.CDLL('libc.so.6') # in Linux/MacOS
libc = ct.cdll.msvcrt # in Windows
for c in range(128):
print(c, ' is a ctrl char' if libc.iscntrl(c) else 'is not a ctrl char')
| [
"nhuntwalker@gmail.com"
] | nhuntwalker@gmail.com |
1ce4e956fe58872f2719ab4f3c67b8c279caf0a8 | 8cf5c91fa744f49b40264061d4fd510ea761cf8f | /build/lib/dragonn/visualize_util.py | f672496759111da32eecdc588fa5a4607cc0eb20 | [
"MIT"
] | permissive | AvantiShri/dragonn | 0f38371ac7734099279f3b3e204565d9a663102f | aeb9674f39b71d07ff62d2c3745bef4a2e55b95f | refs/heads/master | 2020-04-23T18:01:34.394772 | 2019-02-18T06:07:40 | 2019-02-18T06:07:40 | 171,352,688 | 0 | 0 | MIT | 2019-02-18T20:37:59 | 2019-02-18T20:37:59 | null | UTF-8 | Python | false | false | 5,953 | py | # Adapted from Keras source code
# License: https://github.com/fchollet/keras/blob/master/LICENSE
import itertools
from keras.layers.containers import Graph, Sequential
from keras.layers.core import Merge
try:
# pydot-ng is a fork of pydot that is better maintained
import pydot_ng as pydot
except ImportError:
# fall back on pydot if necessary
import pydot
if not pydot.find_graphviz():
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint` to work.")
def layer_typename(layer):
return type(layer).__module__ + "." + type(layer).__name__
def get_layer_to_name(model):
"""Returns a dict mapping layer to their name in the model"""
if not isinstance(model, Graph):
return {}
else:
node_to_name = itertools.chain(
model.nodes.items(), model.inputs.items(), model.outputs.items()
)
return {v: k for k, v in node_to_name}
class ModelToDot(object):
"""
This is a helper class which visits a keras model (Sequential or Graph) and
returns a pydot.Graph representation.
This is implemented as a class because we need to maintain various states.
Use it as ```ModelToDot()(model)```
Keras models can have an arbitrary number of inputs and outputs. A given
layer can have multiple inputs but has a single output. We therefore
explore the model by starting at its output and crawling "up" the tree.
"""
def _pydot_node_for_layer(self, layer, label):
"""
Returns the pydot.Node corresponding to the given layer.
`label` specify the name of the layer (only used if the layer isn't yet
associated with a pydot.Node)
"""
# Check if this already exists (will be the case for nodes that
# serve as input to more than one layer)
if layer in self.layer_to_pydotnode:
node = self.layer_to_pydotnode[layer]
else:
layer_id = 'layer%d' % self.idgen
self.idgen += 1
label = label + " (" + layer_typename(layer) + ")"
if self.show_shape:
# Build the label that will actually contain a table with the
# input/output
outputlabels = str(layer.output_shape)
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = ''
label = "%s\n|{input:|output:}|{{%s}|{%s}}" % (
label, inputlabels, outputlabels)
node = pydot.Node(layer_id, label=label)
self.g.add_node(node)
self.layer_to_pydotnode[layer] = node
return node
def _process_layer(self, layer, layer_to_name=None, connect_to=None):
"""
Process a layer, adding its node to the graph and creating edges to its
outputs.
`connect_to` specify where the output of the current layer will be
connected
`layer_to_name` is a dict mapping layer to their name in the Graph
model. Should be {} when processing a Sequential model
"""
# The layer can be a container layer, in which case we can recurse
is_graph = isinstance(layer, Graph)
is_seq = isinstance(layer, Sequential)
if self.recursive and (is_graph or is_seq):
# We got a container layer, recursively transform it
if is_graph:
child_layers = layer.outputs.values()
else:
child_layers = [layer.layers[-1]]
for l in child_layers:
self._process_layer(l, layer_to_name=get_layer_to_name(layer),
connect_to=connect_to)
else:
# This is a simple layer.
label = layer_to_name.get(layer, '')
layer_node = self._pydot_node_for_layer(layer, label=label)
if connect_to is not None:
self.g.add_edge(pydot.Edge(layer_node, connect_to))
# Proceed upwards to the parent(s). Only Merge layers have more
# than one parent
if isinstance(layer, Merge): # Merge layer
for l in layer.layers:
self._process_layer(l, layer_to_name,
connect_to=layer_node)
elif hasattr(layer, 'previous') and layer.previous is not None:
self._process_layer(layer.previous, layer_to_name,
connect_to=layer_node)
def __call__(self, model, recursive=True, show_shape=False,
connect_to=None):
self.idgen = 0
# Maps keras layer to the pydot.Node representing them
self.layer_to_pydotnode = {}
self.recursive = recursive
self.show_shape = show_shape
self.g = pydot.Dot()
self.g.set('rankdir', 'TB')
self.g.set('concentrate', True)
self.g.set_node_defaults(shape='record')
if hasattr(model, 'outputs'):
# Graph
for name, l in model.outputs.items():
self._process_layer(l, get_layer_to_name(model),
connect_to=connect_to)
else:
# Sequential container
self._process_layer(model.layers[-1], {}, connect_to=connect_to)
return self.g
def to_graph(model, **kwargs):
"""
`recursive` controls whether we recursively explore container layers
`show_shape` controls whether the shape is shown in the graph
"""
return ModelToDot()(model, **kwargs)
def plot(model, to_file='model.png', **kwargs):
graph = to_graph(model, **kwargs)
graph.write_png(to_file)
| [
"annashcherbina@gmail.com"
] | annashcherbina@gmail.com |
4eeccaf83120bc0894347b8916c466c18737500d | 4ecbc07cdc980f899510e0db2971ba754c474670 | /timm/models/layers/mlp.py | 05d076527cfb6f15bcf5f2830fa36777abbc5a1e | [
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-4.0",
"Apache-2.0"
] | permissive | shrikumaran/pytorch-image-models | 8c74ec7d705b6b2fb223d519afdd61f33c108cec | 6d8272e92c3d5f13a9fdd91dfe1eb7fae6784589 | refs/heads/master | 2023-06-16T06:41:30.088230 | 2021-07-08T18:23:55 | 2021-07-08T18:51:12 | 384,009,847 | 0 | 0 | Apache-2.0 | 2021-07-08T05:22:35 | 2021-07-08T05:22:34 | null | UTF-8 | Python | false | false | 3,774 | py | """ MLP module w/ dropout and configurable activation layer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GluMlp(nn.Module):
""" MLP w/ GLU style gating
See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert hidden_features % 2 == 0
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features // 2, out_features)
self.drop = nn.Dropout(drop)
def init_weights(self):
# override init of fc1 w/ gate portion set to weight near zero, bias=1
fc1_mid = self.fc1.bias.shape[0] // 2
nn.init.ones_(self.fc1.bias[fc1_mid:])
nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)
def forward(self, x):
x = self.fc1(x)
x, gates = x.chunk(2, dim=-1)
x = x * self.act(gates)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GatedMlp(nn.Module):
""" MLP as used in gMLP
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
gate_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
if gate_layer is not None:
assert hidden_features % 2 == 0
self.gate = gate_layer(hidden_features)
hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
else:
self.gate = nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.gate(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
| [
"rwightman@gmail.com"
] | rwightman@gmail.com |
a23aaeb1fe329a362d00beace17f570d5ab087b3 | 5cb3b0b88c1baa2fae9562ac4cad5f84d65221e1 | /w7/demo/demo/core/models.py | b936ac6f63740a7a568da59d6fbd5aca09523171 | [] | no_license | bobur554396/BFDjango2020Spring | aa7ad9a595b247100f876e36585368af078d862e | e7ef04be2cf4d2506c2212ea4509a106e12d4dd4 | refs/heads/master | 2020-12-15T07:07:16.214284 | 2020-04-13T19:16:28 | 2020-04-13T19:16:28 | 235,028,587 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | from django.db import models
from rest_framework import serializers
class Publisher(models.Model):
"""Publisher class"""
MALE = 1
FEMALE = 2
GENDER = (
(MALE, 'male'),
(FEMALE, 'female'),
)
name = models.CharField(max_length=300, unique=True)
city = models.CharField(max_length=300)
gender = models.PositiveSmallIntegerField(choices=GENDER, default=MALE)
objects = models.Manager()
class Meta:
verbose_name = 'Publisher'
verbose_name_plural = 'Publishers'
# unique_together = ('name', 'city')
# ordering = ('name',)
# db_table = 'publishers_table'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
pass
class Author(models.Model):
name = models.CharField(max_length=300)
email = models.CharField(max_length=300)
rating = models.IntegerField(default=0)
# creator = models.ForeignKey(MainUser)
def __str__(self):
return self.name
def set_new_rating(self, value):
self.rating = value
self.save()
# @property
def books_count(self):
pass
# return self.books.count()
# print(a.books_count)
# class PublishedBook(models.Manager):
# def get_queryset(self):
# return self.filter(is_published=True)
#
# def filter_by_name(self, name_pattern):
# return self.filter(name__contains=name_pattern)
#
#
# class NotPublishedBook(models.Manager):
# def get_queryset(self):
# return self.filter(is_published=False)
def valid_num_pages(value):
if not(10 >= value >= 5000):
raise serializers.ValidationError('invalid num of pages')
class Book(models.Model):
name = models.CharField(max_length=300)
price = models.FloatField(default=0)
num_pages = models.IntegerField(default=0,
validators=[valid_num_pages])
is_published = models.BooleanField(default=False)
author = models.ForeignKey(Author,
on_delete=models.CASCADE,
related_name='books')
publisher = models.ForeignKey(Publisher,
on_delete=models.CASCADE,
related_name='books')
objects = models.Manager()
# published_books = PublishedBook()
# not_published_books = NotPublishedBook()
@property
def price_round(self):
return round(self.price, 3)
@classmethod
def top_ten(cls):
return cls.objects.all()[:10]
@staticmethod
def cmp_books(book1, book2):
return book1.price > book2.price
# b1 = Book()
# print(b1.price_round)
#
# b2 = Book()
#
# ret = Book.cmp_books(b1, b2)
class Tag(models.Model):
name = models.CharField(max_length=200)
class BookTag(models.Model):
tag = models.ForeignKey(Tag, on_delete=models.CASCADE,
related_name='books')
book = models.ForeignKey(Book, on_delete=models.CASCADE,
related_name='tags')
# t = Tag()
# t.books.all()
#
# b = Book()
# for book_tag in b.tags.all():
# print(book_tag.tag)
#
| [
"bobur.muhsimbaev@gmail.com"
] | bobur.muhsimbaev@gmail.com |
a28d8b93e2c943b416dc0e882ce5ceeaff0889f8 | 551ef0567aca428a535775d3949f5d9670c0d29c | /abc/212/c/main.py | 9351e78f01dac0c8505cf070b9bc58365ee2fbc7 | [] | no_license | komo-fr/AtCoder | 7451a9402466ce8d487d0c521128732061c647df | c916889294cb12f21e74254de43b3e17e1b354bc | refs/heads/master | 2023-07-22T07:05:52.955188 | 2023-03-01T14:22:16 | 2023-03-01T14:22:16 | 213,109,943 | 0 | 0 | null | 2023-07-06T22:01:28 | 2019-10-06T04:44:49 | Python | UTF-8 | Python | false | false | 529 | py | #!/usr/bin/env python3
N, M = list(map(int, input().split()))
a_list = list(map(int, input().split()))
b_list = list(map(int, input().split()))
ab_list = []
for a in a_list:
ab_list.append((a, "a"))
for b in b_list:
ab_list.append((b, "b"))
ab_list = sorted(ab_list)
a = None
b = None
min_y = float("inf")
for x in ab_list:
if x[1] == "a":
a = x[0]
if x[1] == "b":
b = x[0]
if a is not None and b is not None:
y = abs(a - b)
min_y = min([y, min_y])
ans = min_y
print(ans)
| [
"komo.mdrms@gmail.com"
] | komo.mdrms@gmail.com |
4c0b163a7460d2fd4bc039cf7ea4f217d04db9cf | 47ce68e1ff970318fd31ac43405d0e1fa3594bf6 | /Models/biGAN/lowerDimBiganXEntropy.py | c888aeec7669d02cd851651a907718f7717dec9c | [
"BSD-3-Clause"
] | permissive | Midoriii/Anomaly_Detection_Diploma | 7196da379f8aefbd4546ca23e8303d1829e059fb | 11145e3e5210a4e45a33d98b138213edb7bc5d3d | refs/heads/master | 2023-03-25T20:42:56.961210 | 2021-03-14T01:13:39 | 2021-03-14T01:13:39 | 261,205,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,537 | py | '''
Copyright (c) 2021, Štěpán Beneš
Basic bigAN net, using cross entropy as loss and made to work on 192x192
'''
import numpy as np
from Models.biGAN.BaseBiganModel import BaseBiganModel
from Models.Losses.custom_losses import wasserstein_loss
from Models.biGAN.weightclip_constraint import WeightClip
from keras.layers import Input, Reshape, Dense, Flatten, concatenate
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D, BatchNormalization, Dropout, LeakyReLU
from keras.models import Model
from keras.optimizers import RMSprop, Adam, SGD
class lowerDimBiganXEntropy(BaseBiganModel):
def __init__(self, input_shape, latent_dim=24, lr=0.0005, w_clip=0.01, batch_size=4):
super().__init__(input_shape, latent_dim, lr, w_clip, batch_size)
self.name = "lowerDimBiganXEntropy"
g_optimizer = Adam(lr=self.lr, beta_1=0.5)
d_optimizer = SGD(lr=self.lr)
self.disc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_fake = np.ones((self.batch_size, 1))
self.disc_labels_fake = np.ones((self.batch_size, 1))
self.d = self.build_discriminator()
self.d.compile(optimizer=d_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
self.g = self.build_generator()
self.e = self.build_encoder()
# The Discriminator part in GE model won't be trainable - GANs take turns.
# Since the Discrimiantor itself has been previously compiled, this won't affect it.
self.d.trainable = False
self.ge = self.build_ge_enc()
self.ge.compile(optimizer=g_optimizer, loss=['binary_crossentropy', 'binary_crossentropy'])
return
def build_generator(self):
z_input = Input(shape=[self.latent_dim])
x = Dense(6*6*32)(z_input)
x = Reshape([6, 6, 32])(x)
# 6 -> 12
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 12 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 24 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 48 -> 96
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 96 -> 192
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(1, (3, 3), activation='tanh', padding='same')(x)
return Model(inputs=z_input, outputs=x)
def build_encoder(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
# 192 -> 96
x = Conv2D(32, (3, 3), padding='same')(img_input)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 96 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 48 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 24 -> 12
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 12 -> 6
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(self.latent_dim)(x)
return Model(inputs=img_input, outputs=x)
def build_discriminator(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
# Latent
l = Dense(256)(z_input)
l = LeakyReLU(0.1)(l)
l = Dense(256)(l)
l = LeakyReLU(0.1)(l)
# Image
x = Conv2D(64, (3, 3), padding='same')(img_input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# Joint
x = Flatten()(x)
x = concatenate([x, l])
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(1, activation='sigmoid')(x)
return Model(inputs=[img_input, z_input], outputs=x)
def build_ge_enc(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
fake_imgs = self.g(z_input)
critic_fake = self.d([fake_imgs, z_input])
fake_z = self.e(img_input)
critic_real = self.d([img_input, fake_z])
return Model(inputs=[img_input, z_input], outputs=[critic_real, critic_fake])
| [
"stephen.Team24@gmail.com"
] | stephen.Team24@gmail.com |
4f6c3c3b84254e921f2c0c5e943bbdf9507428ac | 54277288865f738e44d7be1d6b41b19c63af267e | /configs/vcop/pretraining/r3d_18_ucf101.py | eee5d0eb75ac9265d0ceb4f2ff2d8593597c7c29 | [] | no_license | scenarios/SR-SVRL | 7b41d29e16cff3020f333efc28a624d85bba4537 | 26e89ecb29355635b10a355f2f16f1b5db9c4e9b | refs/heads/master | 2023-02-26T06:16:13.314491 | 2021-01-30T16:30:57 | 2021-01-30T16:30:57 | 307,295,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | _base_ = ['./default_runtime.py']
work_dir = './output/vcop/pretraining/r3d_18_ucf101'
model = dict(
type='VCOP',
backbone=dict(
type='R3D',
depth=18,
num_stages=4,
stem=dict(
temporal_kernel_size=3,
temporal_stride=1,
in_channels=3,
with_pool=False,
),
down_sampling=[False, True, True, True],
channel_multiplier=1.0,
bottleneck_multiplier=1.0,
with_bn=True,
pretrained=None,
),
vcop_head=dict(
in_channels=512,
tuple_len=3,
hidden_channels=512,
dropout_ratio=0.25
)
)
| [
"zyz0205@hotmail.com"
] | zyz0205@hotmail.com |
0199017464728830e0f1cc7a81ab6727cbc32e81 | f8ffa8ff257266df3de9d20d95b291e393f88434 | /Python - advanced/zajecia08/zadanie_domowe/wc.py | 8f7ba321ebbab792e39244c1e46ea04d4aff5b79 | [] | no_license | janiszewskibartlomiej/Python_Code_Me_Gda | c0583c068ef08b6130398ddf93c3a3d1a843b487 | 7568de2a9acf80bab1429bb55bafd89daad9b729 | refs/heads/master | 2020-03-30T05:06:26.757033 | 2020-03-02T08:53:28 | 2020-03-02T08:53:28 | 150,781,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Bartłomiej Janiszewski
from wc_funkcje import ustaw_logger, wc
import log
import datetime
if __name__ == '__main__':
start = datetime.datetime.now()
nazwa_pliku = 'zen.txt'
wybrany_szablon = 'pelny'
poziom_logowania = log.DEBUG
ustaw_logger(poziom_logowania)
wynik = wc(nazwa_pliku, wybrany_szablon)
print(wynik)
czas_wykonania = datetime.datetime.now() - start
log.debug(f'czas wykonywania programu: {czas_wykonania}')
| [
"janiszewski.bartlomiej@gmail.com"
] | janiszewski.bartlomiej@gmail.com |
7480bda28d7c6e29cbb2bd96889c9340d1a9e221 | 80e3dfac67631cef70da3dc015d4557d46d41801 | /src/grid/changemapanimations.py | 59382ea322cc21c033792db21c18f8d56677383d | [] | no_license | snizzo/lucrezia | 7b6d648f11581ddd21acd301df5426d77b6b21dd | fb41e5ba4435d48d702c58aa9402c10ab12a51b9 | refs/heads/master | 2023-06-22T13:14:06.634116 | 2023-06-18T22:42:14 | 2023-06-18T22:42:14 | 32,765,327 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | #panda3d
from panda3d.core import NodePath, LPoint2i
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
#internals
from utils.toggle import Toggle
from utils.once import Once
from objects.grass import Grass
from objects.light import Light
from tile import Tile
from character import Character
from utils.fadeout import FadeOut
import os, sys
'''
This class abstracts the 2D grid commoly used in 2D games
to use with panda3d.
INTERNAL TILESET EXAMPLE GRAPH:
^
|
|
y |
|
|
O------------>
x
'''
class ChangeMapAnimations(DirectObject):
'''
Autogenerates empty tileset at start
'''
def __init__(self):
pass
| [
"happy.snizzo@gmail.com"
] | happy.snizzo@gmail.com |
a0b703f87a403de60bd497d01dc11969567edd6c | 2eb8e3606a8df45d432fdf56ee9aa24942304526 | /rocketgram/api/shipping_option.py | 55db89dd5aca13265cd5374ea45adc78d30833c9 | [
"MIT"
] | permissive | KulZlaK/rocketgram | 22848293980ba44dd9fb63db28f34be36c437c84 | 09587deecffcd7ccc9529f4d9e51221888870f23 | refs/heads/master | 2022-07-27T23:25:51.254444 | 2020-05-15T21:36:57 | 2020-05-15T21:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Copyright (C) 2015-2020 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import List
from .labeled_price import LabeledPrice
@dataclass(frozen=True)
class ShippingOption:
"""\
Represents ShippingOption object:
https://core.telegram.org/bots/api#shippingoption
"""
id: str
title: str
prices: List[LabeledPrice]
| [
"vd@"
] | vd@ |
4fcaed0256103e3eb8ace9827d79a215ae909c24 | 3dfb4ee39555b30e6e0c6fcdbef371864e69f694 | /google-cloud-sdk/.install/.backup/lib/surface/preview/app/__init__.py | 8250cdb4a7ea1553778dd1cecb732f0c19282aa6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | MD-Anderson-Bioinformatics/NG-CHM_Galaxy | 41d1566d5e60416e13e023182ca4351304381a51 | dcf4886d4ec06b13282143ef795c5f0ff20ffee3 | refs/heads/master | 2021-06-02T21:04:12.194964 | 2021-04-29T14:45:32 | 2021-04-29T14:45:32 | 130,249,632 | 0 | 1 | null | 2020-07-24T18:35:21 | 2018-04-19T17:25:33 | Python | UTF-8 | Python | false | false | 2,988 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud app group."""
import sys
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import platforms
class UnsupportedPythonVersionError(exceptions.Error):
pass
# TODO(b/24169312): remove
CHANGE_WARNING = """\
The `gcloud preview app` surface is rapidly improving. Look out for
changing flags and new commands before the transition out of the `preview`
component. These changes will be documented in the Cloud SDK release notes
<https://goo.gl/X8apDJ> and via deprecation notices for changing commands.
If you would like to avoid changing behavior, please pin to a fixed version of
the Google Cloud SDK as described under the "Alternative Methods" section of the
Cloud SDK web site: <https://cloud.google.com/sdk/#alternative>.
"""
@base.Beta
class Appengine(base.Group):
"""Manage your App Engine app.
This set of commands allows you to deploy your app, manage your existing
deployments, and also run your app locally. These commands replace their
equivalents in the appcfg tool.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To run your app locally in the development application server, run:
$ dev_appserver.py DEPLOYABLES
To create a new deployment of one or more modules, run:
$ {command} deploy DEPLOYABLES
To list your existing deployments, run:
$ {command} modules list
To generate config files for your source directory:
$ {command} gen-config
""",
}
def Filter(self, unused_context, unused_args):
# TODO(b/24169312): remove
if not properties.VALUES.app.suppress_change_warning.GetBool():
log.warn(CHANGE_WARNING)
properties.PersistProperty(properties.VALUES.app.suppress_change_warning,
'true')
if not platforms.PythonVersion().IsSupported():
raise UnsupportedPythonVersionError(
('Python 2.7 or greater is required for App Engine commands in '
'gcloud.\n\n'
'Your Python location: [{0}]\n\n'
'Please set the CLOUDSDK_PYTHON environment variable to point to a '
'supported version in order to use this command.'
).format(sys.executable))
| [
"rbrown@insilico.us.com"
] | rbrown@insilico.us.com |
5ea65368b06c233a66fb293222f5e0e86ade3e0a | f81c9451768a52bc22c6a2abe87c25615ea8b3e6 | /汇总文件/jobboleproject/jobboleproject/spiders/crawlJobbole.py | b191639c8f7518c6e3da765558ff142ab763b436 | [] | no_license | hanfang302/crawlercollect | 07cb7fb5faf85018e82f48b0209bd86dc5c52f8f | 8f7b286df8bf0a344c3656bda5c7fb96cee640dc | refs/heads/master | 2020-03-22T05:27:07.928855 | 2018-07-03T10:26:07 | 2018-07-03T10:26:07 | 139,566,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
#继承crawlspider,crawlspider有继承之spider
class CrawljobboleSpider(CrawlSpider):
#爬虫名称
name = 'crawlJobbole'
#域,制定可以爬取的url必须在这个域下面,不在的自动忽略
allowed_domains = ['blog.jobbole.com']
#同样有一个其实url
start_urls = ['http://blog.jobbole.com/']
#
#LinkExtractor一个类
#restrict_xpaths:制定了xpath路径,那么allow
#allow:匹配‘满足’括号中‘正则表达式’的url会被提取,如果为空,这全部匹配
#deny:匹配‘不满足’括号中‘正则表达式’的url会被提取
#callback:回调函数
rules = (
Rule(LinkExtractor(allow=r'.*?/item/bcjdb',deny=r'.*?/notitem/slcnd',restrict_xpaths='//div[@class=a]'), callback='parse_item', follow=True),
)
def parse_item(self, response):
i = {}
#i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract()
#i['name'] = response.xpath('//div[@id="name"]').extract()
#i['description'] = response.xpath('//div[@id="description"]').extract()
return i
| [
"hanfang123@aliyun.com"
] | hanfang123@aliyun.com |
1321de452caf0060f6d2cf2523a3f418c5ce49c9 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/different_ways_to_add_parentheses.py | bd10c6c02ef9f79a33afab5e46cb95900b8ae084 | [] | no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | class Solution(object):
def diffWaysToCompute(self, exp):
def calculate(num1, num2, sign):
if sign == "+":
return num1 + num2
elif sign == "-":
return num1 - num2
else:
return num1 * num2
ans = []
for i in xrange(len(exp)):
if not exp[i].isdigit():
left = self.diffWaysToCompute(exp[:i])
right = self.diffWaysToCompute(exp[i+1:])
for n1 in left:
for n2 in right:
ans.append(calculate(n1, n2, exp[i]))
if not ans:
return [int(exp)]
return ans | [
"31617695+hayeonk@users.noreply.github.com"
] | 31617695+hayeonk@users.noreply.github.com |
679e412a82c2340b28045e6cdb388a19bfb5b799 | d628948e86841ae3efc93eba2e321dd58fe33b07 | /bookmanager/settings.py | 14f4a01b82c72de936cfd48b51e0a9eebea7e085 | [] | no_license | shd0812/django_demo | 8986dde23c2fd8ae4a46f8a938c9c0924200d4b2 | 832c028171795bf6feabc39d313bcad8cfbe5b94 | refs/heads/master | 2022-12-05T18:57:11.213951 | 2020-08-24T08:15:50 | 2020-08-24T08:15:50 | 289,303,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | """
Django settings for bookmanager project.
Generated by 'django-admin startproject' using Django 1.11.28.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's6e4p%v(%&)$d(s%$l&&inwkn8)4)%&kqc-w+yssvg^)g)fe41'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appone.apps.ApponeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bookmanager',
'HOST': '122.51.192.201',
'PORT': 3306,
'USER': 'root',
'PASSWORD': '123'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| [
"759275499@qq.com"
] | 759275499@qq.com |
bff8f74051ac91f5994980953d38279ba111917d | e9b9626ffce09bf011803a33b4780d8dcc6657e8 | /class9/collateral/put_file.py | aae2559cec9e5615c13bc1c06a49886309059ceb | [
"Apache-2.0"
] | permissive | zh0u0liver/netmiko_course | f7a91eb3f543e2a609172280f13b6bc8c4fbe1b9 | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | refs/heads/master | 2023-08-04T05:41:32.745403 | 2021-09-14T02:18:44 | 2021-09-14T02:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import os
from getpass import getpass
from netmiko import ConnectHandler, file_transfer
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
# Need a privilege15 account (no enable call)
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
# Secure copy server must be enable on the device ('ip scp server enable')
source_file = "test2.txt"
dest_file = "test2.txt"
direction = "put"
file_system = "flash:"
ssh_conn = ConnectHandler(**cisco3)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
overwrite_file=True, # default "will not overwrite"
inline_transfer=True,
)
ssh_conn.disconnect()
print(transfer_dict)
| [
"ktbyers@twb-tech.com"
] | ktbyers@twb-tech.com |
098d0a2a8b1145a3df6d306fd83f6c68df598e98 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/distiller/CWD/pytorch/mmrazor/configs/nas/mmcls/onceforall/ofa_mobilenet_supernet_32xb64_in1k.py | 341f4bda969cdd7625e1da7e3e5ff0c36e6fee57 | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 1,671 | py | _base_ = [
'mmcls::_base_/default_runtime.py',
'mmrazor::_base_/settings/imagenet_bs2048_ofa.py',
'mmrazor::_base_/nas_backbones/ofa_mobilenetv3_supernet.py',
]
supernet = dict(
_scope_='mmrazor',
type='SearchableImageClassifier',
data_preprocessor=_base_.data_preprocessor,
backbone=_base_.nas_backbone,
neck=dict(type='mmcls.GlobalAveragePooling'),
head=dict(
type='DynamicLinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='mmcls.LabelSmoothLoss',
num_classes=1000,
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
input_resizer_cfg=_base_.input_resizer_cfg,
connect_head=dict(connect_with_backbone='backbone.last_mutable_channels'),
)
model = dict(
_scope_='mmrazor',
type='BigNAS',
drop_path_rate=0.2,
backbone_dropout_stages=[6, 7],
architecture=supernet,
distiller=dict(
type='ConfigurableDistiller',
teacher_recorders=dict(
fc=dict(type='ModuleOutputs', source='head.fc')),
student_recorders=dict(
fc=dict(type='ModuleOutputs', source='head.fc')),
distill_losses=dict(
loss_kl=dict(type='KLDivergence', tau=1, loss_weight=1)),
loss_forward_mappings=dict(
loss_kl=dict(
preds_S=dict(recorder='fc', from_student=True),
preds_T=dict(recorder='fc', from_student=False)))),
mutators=dict(type='mmrazor.NasMutator'))
model_wrapper_cfg = dict(
type='mmrazor.BigNASDDP',
broadcast_buffers=False,
find_unused_parameters=True)
| [
"mingjiang.li@iluvatar.ai"
] | mingjiang.li@iluvatar.ai |
fc255fe8fed8197264367180513e6fb8aebecba2 | 08b439af0eeccb93b41193b65c196b7ab2dbe773 | /award/urls.py | 1ae3b1aec217ee245e5c22ac908b89354d53b4e5 | [
"MIT"
] | permissive | EidAbdullahi/hilal | a80aa3f6b7ce2a3098f5dcada9bbb2fc52dc6ba9 | 1ccb6a1b5a5143164ced9b8a4c742997abbb6296 | refs/heads/master | 2023-05-01T14:28:46.837200 | 2021-05-05T12:02:48 | 2021-05-05T12:02:48 | 363,429,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """award URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('wardapp.urls')),
path('tinymce/', include('tinymce.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/register/', views.LoginView.as_view(template_name='django_registration/registration_form.html')),
path('accounts/login/', views.LoginView.as_view(template_name='registration/login.html')),
path('accounts/logout/', views.logout_then_login, {"next_page": '/'}),
path('ratings/', include('star_ratings.urls', namespace='ratings')),
path('api-token-auth', obtain_auth_token),
]
| [
"eidabdullahi10@gmail.com"
] | eidabdullahi10@gmail.com |
515604f2abc3e0df4872c067064277872c874543 | 6f65ebe31650b73e9c5e77d598295eb1362702bd | /tools/Polygraphy/tests/logger/test_logger.py | 078390e71a50b887380cad96436dc31ad37a6be2 | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | hierarchyJK/TensorRT | d9b5be9964e54af8b2789a6e98f393519956ed90 | c2668947ea9ba4c73eb1182c162101f09ff250fd | refs/heads/master | 2023-06-26T07:01:08.922681 | 2021-07-12T09:28:23 | 2021-07-13T20:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
from polygraphy.logger.logger import Logger
# We don't use the global logger here because we would have to reset the state each time.
class TestLogger(object):
def test_log_file(self):
logger = Logger()
with tempfile.NamedTemporaryFile("w+") as log_file:
logger.log_file = log_file.name
assert logger.log_file == log_file.name
logger.info("Hello")
log_file.seek(0)
assert log_file.read() == "[I] Hello\n"
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
5e8052af8629684bb42632d743efaf5d48119ba1 | 1b94aae63500b6ff94b0446d01c3c9bee385fad2 | /.history/chandori/account/views_20210824172332.py | 4c4c5078aeec6d0af4357fc59d04853d7bbcc83d | [] | no_license | miracle3070/chandori | 71389c2a9df76c242a5895c2c23d4394220f9c8e | b01d1eaa1d9c0d12d7abdc8f164039bcd9c42925 | refs/heads/master | 2023-08-18T11:46:11.303934 | 2021-09-28T19:23:22 | 2021-09-28T19:23:22 | 393,949,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserChangeForm
from django.utils import timezone
from .models import *
from .forms import CustomUserChangeForm
from django.contrib import messages
def edit(request):
return render(request, edit.html)
def edit(request):
if request.method == 'POST':
user_change_form = CustomUserChangeForm(request.POST, instance = request.user)
if user_change_form.is_valid():
user_change_form.save()
messages.success(request, '회원정보가 수정되었습니다.')
return render(request, 'accounting/templates/base.html')
else:
user_change_form = CustomUserChangeForm(instance = request.user)
return render(request, 'from django.contrib import messages.html', {'user_change_form':user_change_form})
def login_view(request):
error_msg = ""
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
if username == "" or password == "":
error_msg = "아이디 또는 비밀번호를 입력해주세요."
else:
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect("accounting:home")
else:
error_msg = "아이디 또는 비밀번호가 틀렸습니다."
return render(request, "login.html", {"error_msg" : error_msg})
def logout_view(request):
logout(request)
return redirect("accounting:home")
def signup_view(request):
error_msg = ""
if request.method == "POST":
password1 = request.POST["password1"]
password2 = request.POST["password2"]
if password1 == password2:
username = request.POST["username"]
nickname = request.POST["nickname"]
age = int(request.POST['age'])
job = request.POST['job']
income = int(request.POST['income'])
signup_date = timezone.now()
user = CustomUser.objects.create_user(
username = username,
password = password1,
nickname = nickname,
age = age,
job = job,
income = income,
signup_date = signup_date,
)
return redirect("account:login")
else:
error_msg = "비밀번호가 일치하지 않습니다."
return render(request, "signup.html", {"error_msg" : error_msg})
| [
"62284729+ehddus980@users.noreply.github.com"
] | 62284729+ehddus980@users.noreply.github.com |
65ed5768bb3bdfc61f10d6cc6a59dfbb999c8d92 | 4a020c0a492d931f7da5c452c9569fba06703686 | /testing/web-platform/tests/webdriver/ecmascript/ecmascript_test.py | cf27c01c06a02063b1347f6673721aec1f88a454 | [
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rbernon/wine-gecko | 353173511a790127ffa2ad39d630b8a0dcbbf5bf | 550ad9eac229b769992f421ce9492ca46edabaa0 | refs/heads/master | 2023-08-06T21:25:26.836672 | 2020-11-30T12:47:56 | 2021-09-30T08:14:19 | 411,965,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class EcmasScriptTest(base_test.WebDriverBaseTest):
def test_that_ecmascript_returns_document_title(self):
self.driver.get(self.webserver.where_is("ecmascript/res/ecmascript_test.html"))
result = self.driver.execute_script("return document.title;");
self.assertEqual("ecmascript test", result);
if __name__ == "__main__":
unittest.main()
| [
"rbernon@codeweavers.com"
] | rbernon@codeweavers.com |
010582cd02fe383b3413bf2b655b36b3af22c368 | 7ba54b83de814cd34f0058e797cf3d6313057147 | /mmdetection/configs/xray/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_2x_nih_chestx_det.py | f3478d0c54101f9e68bb9062d6b3357c0452e8b7 | [] | no_license | TerryGriffin/COMP5300.AdvancedDeepLearning | 083344a939e99fe7e2119225e023ab8aebdda04e | 9521c3327ba6d8344711cd3e404e627af9ffc936 | refs/heads/master | 2023-02-01T10:06:00.355905 | 2020-12-16T02:05:12 | 2020-12-16T02:05:12 | 321,839,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | _base_ = './faster_rcnn_r50_fpn_2x_nih_chestx_det.py'
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| [
"tgriffin62@yahoo.com"
] | tgriffin62@yahoo.com |
f6b37a366b054c1e431aefa17fb9943058075c76 | 0b0f4c15abb143a4b24b3bb01b9a6184df6867bf | /hello_requests/yes_or_no.py | f8b0ac04aff02b0e85c9cd9e2d08c68386c67414 | [] | no_license | saida93522/hello_requests_python | efe15130cbb327a78d3ef1aa8d6e43fdaf339709 | 76dc533dea9e1b1413d04b3fc20c52987139b780 | refs/heads/master | 2023-05-02T20:43:30.906319 | 2020-10-16T20:00:32 | 2020-10-16T20:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import requests
import logging
class APIError(Exception):
pass
def yes_or_no():
try:
response = requests.get('https://yesno.wtf/api/')
response.raise_for_status()
except Exception as e:
logging.exception(e)
raise APIError('Error connecting to API')
try:
data = response.json()
except Exception as e:
logging.exception(e)
raise APIError('Data returned is not JSON') from e
try:
answer = data['answer']
except Exception as e:
logging.exception(e)
raise APIError('JSON does not contain expected data') from e
return answer
def main():
try:
answer = yes_or_no()
print(answer)
except APIError as e:
message, = e.args
print(message)
if __name__ == '__main__':
main() | [
"10088152+claraj@users.noreply.github.com"
] | 10088152+claraj@users.noreply.github.com |
4504613d10c713611621a028b61bb44666f0a9f3 | 716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1 | /6-Operators of Python/30-Identity-and-Membership-operators.py | 1c0385bd6405585f5684b04ece645772b30e7ea3 | [] | no_license | devopstasks/PythonScripting | ac45edd72dc134ec3539b962f02dfc866f365ecf | 48bc37733ae6b3be4e2d64909ffe0962b6908518 | refs/heads/master | 2023-03-29T11:18:01.329452 | 2021-04-07T03:25:20 | 2021-04-07T03:25:20 | 350,388,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | '''
===================================
Identity operators are used to find the type of: class/type/object.
There are 2 types of Identity operators
- is
- is not
===================================
x=6
type(x) -> <class 'int'>
y="hi"
type(y) -> <class 'str'>
type(x) is type(y) -> False
type(x) is not type(y) -> True
'''
'''
====================================
Membership operators are used to validate the membership of a value.
There are 2 types of Membership operators
- in
- not in
====================================
x=[2,3,5,9]
3 in x -> True
4 in x -> False
'''
| [
"rpadhan2015@gmail.com"
] | rpadhan2015@gmail.com |
b7346b389ff30c566e8bc6651ae60b10c67ebf2b | 0e3462bd127c2072b34ac4885c034bde58ac8063 | /c_sharp_code_rewritten_in_python/transformer_command_line_interface.py | 7e6cb49274f86f384976fe5a54132f56ddee81b0 | [] | no_license | SNeicer/py_regex_translator_core | 608b3b6e5d4aff1496a910d38276562f25013cb7 | ea45e1eb556285f20f702ae6bd697aebd6f0efe6 | refs/heads/master | 2023-02-12T04:49:03.063797 | 2021-01-08T14:06:15 | 2021-01-08T14:06:15 | 326,164,216 | 0 | 0 | null | 2021-01-02T11:26:50 | 2021-01-02T11:04:11 | Python | UTF-8 | Python | false | false | 331 | py | from c_sharp_code_rewritten_in_python import interfaces
class TransformerCLI:
def __init__(self, transformer: interfaces.IFileTransformer):
self._transformer = transformer
def run(self, *args):
sourcePath = args[0]
targetPath = args[1]
self._transformer.transform(sourcePath, targetPath)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
8eab42ec4e999316d34dc5b305592808d36f835a | 9dc8c299ee7d4a225002127cc03b4253c8a721fd | /libs/simulator/livepush_acl_topic_simulator.py | 823dabe3f0baee30b5bbd03a10492cff39593daf | [] | no_license | namesuqi/strategy_corgi | 5df5d8c89bdf7a7c465c438048be20ef16120f4f | 557b8f8eabf034c2a57c25e6bc581858dd4f1b6e | refs/heads/master | 2020-03-07T04:00:18.313901 | 2018-03-29T07:50:50 | 2018-03-29T07:50:50 | 127,253,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/python
# coding=utf-8
# livepush_acl report simulator
from libs.simulator.topic_simulator import *
from config import push_review_duration
from libs.module.live_push import *
# live_push report livepush_acl
class LivePushAclTopicSimulator(TopicSimulator):
def __init__(self, log):
super(LivePushAclTopicSimulator, self).__init__()
self.log = log
self.table_name = Live_Push
self.topic = TOPIC_PUSH_ACL
self.review_duration = push_review_duration
def create_topic_data(self, result, **kwargs):
return {"topic": self.topic,
"timestamp": int(time.time() * 1000),
"event": result.event,
"livepush_ip": result.ip,
"reason": result.reason
}
| [
"suqi_name@163.com"
] | suqi_name@163.com |
a22c702811b62c295c71fa1fb4f4aff77ee8108e | 9188d0d7ce9fc5fadf4d2593741894e1448f9326 | /indico/vendor/django_mail/backends/locmem.py | 4e3f2ecda085f3995cb3fbe9698f0f8bd4f91571 | [
"MIT"
] | permissive | vaclavstepan/indico | b411410416acdfa50b0d374f89ec8208de00fb2f | 8ca1ac4d4a958f22f24580a790b3cb015570bdfb | refs/heads/master | 2023-07-21T04:42:03.031131 | 2021-09-01T09:54:17 | 2021-09-01T09:54:17 | 385,897,420 | 0 | 0 | MIT | 2021-07-16T13:07:32 | 2021-07-14T10:16:57 | null | UTF-8 | Python | false | false | 1,481 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# The code in here is taken almost verbatim from `django.core.mail.backends.locmem`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/3.1.x/django/core/mail/backends/locmem.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Backend for test environment.
"""
from indico.vendor import django_mail
from .base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""
An email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(django_mail, 'outbox'):
django_mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
msg_count = 0
for message in messages: # .message() triggers header validation
message.message()
django_mail.outbox.append(message)
msg_count += 1
return msg_count
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
192f5be2ea74ff5dd2215cd23fc641a35c5f9e09 | 3715df2c833919376a3ee44de8fc64d1c2abe8ce | /AutomlCore/build/lib/algorithms/classification/naive_bayes_complement.py | f057f2f40891211ae4bcc2e4da46ea7a74226a34 | [] | no_license | mindis/dachshund | 6c07f8eb6b9f75f66c74ec3748dfa42a31cff4d5 | 2f1b3e5866e06424c700f3a106051fe69bcb18bc | refs/heads/master | 2022-12-09T05:17:00.812348 | 2020-09-03T05:25:09 | 2020-09-03T05:25:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import numpy as np
from sklearn.naive_bayes import ComplementNB
from sklearn.metrics import accuracy_score
from hyperopt import hp
from utils import definitions
from .. import model, model_classification
class ComplementNBClassifier(model.Model, model_classification.ModelClassification):
def __init__(self, _project_name):
super().__init__(_project_name)
self.model_name = 'NaiveBayesComplement'
self.params_list = {}
def getHyperParameterSpace(self):
return {
'alpha': hp.uniform('alpha', 0, 100),
'norm': [False, True],
}
def getModel(self, _params):
return ComplementNB(
alpha= _params['alpha'],
norm= _params['norm'],
)
def trainModel(self, x, y, _params):
self.model = self.getModel(_params)
self.model.fit(x, y)
self.saveModel()
def getPredictResult(self, x):
return self.model.predict(x)
def getPredictProbaResult(self, x):
return self.model.predict_proba(x)
| [
"aoba0203@naver.com"
] | aoba0203@naver.com |
f4b07d056b6b0304d6a97622d9ff3ea596b95948 | 61e98b0302a43ab685be4c255b4ecf2979db55b6 | /sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/input/func_e0604.py | d077f31adbfa8f55fc8501b1293f76262c0e2dfc | [
"Apache-2.0",
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] | permissive | dzenyu/kafka | 5631c05a6de6e288baeb8955bdddf2ff60ec2a0e | d69a24bce8d108f43376271f89ecc3b81c7b6622 | refs/heads/master | 2021-07-16T12:31:09.623509 | 2021-06-28T18:22:16 | 2021-06-28T18:22:16 | 198,724,535 | 0 | 0 | Apache-2.0 | 2019-07-24T23:51:47 | 2019-07-24T23:51:46 | null | UTF-8 | Python | false | false | 206 | py | """Test for invalid objects in a module's __all__ variable.
"""
# pylint: disable=R0903,R0201,W0612
__revision__ = 0
def some_function():
"""Just a function."""
pass
__all__ = [some_function]
| [
"alex.barreto@databricks.com"
] | alex.barreto@databricks.com |
c7cee1ce7b74766902666791db0f8f0747fb6459 | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /examples/model_compress/pruning/v2/movement_pruning_glue.py | a8365d9834075161641b7946bb41b2c961cfe190 | [
"MIT"
] | permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 4,715 | py | import functools
from tqdm import tqdm
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from datasets import load_metric, load_dataset
from transformers import (
BertForSequenceClassification,
BertTokenizerFast,
DataCollatorWithPadding,
set_seed
)
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gradient_accumulation_steps = 16
# a fake criterion because huggingface output already has loss
def criterion(input, target):
return input.loss
def trainer(model, optimizer, criterion, train_dataloader):
model.train()
counter = 0
for batch in tqdm(train_dataloader):
counter += 1
batch.to(device)
optimizer.zero_grad()
outputs = model(**batch)
# pruner may wrap the criterion, for example, loss = origin_loss + norm(weight), so call criterion to get loss here
loss = criterion(outputs, None)
loss = loss / gradient_accumulation_steps
loss.backward()
if counter % gradient_accumulation_steps == 0 or counter == len(train_dataloader):
optimizer.step()
if counter % 16000 == 0:
print('Step {}: {}'.format(counter // gradient_accumulation_steps, evaluator(model, metric, is_regression, validate_dataloader)))
def evaluator(model, metric, is_regression, eval_dataloader):
model.eval()
for batch in tqdm(eval_dataloader):
batch.to(device)
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
metric.add_batch(
predictions=predictions,
references=batch["labels"],
)
return metric.compute()
if __name__ == '__main__':
task_name = 'mnli'
is_regression = False
num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2)
train_batch_size = 8
eval_batch_size = 8
set_seed(1024)
tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
sentence1_key, sentence2_key = task_to_keys[task_name]
# used to preprocess the raw data
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=False, max_length=128, truncation=True)
if "label" in examples:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
raw_datasets = load_dataset('glue', task_name, cache_dir='./data')
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names)
train_dataset = processed_datasets['train']
validate_dataset = processed_datasets['validation_matched' if task_name == "mnli" else 'validation']
data_collator = DataCollatorWithPadding(tokenizer)
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=train_batch_size)
validate_dataloader = DataLoader(validate_dataset, collate_fn=data_collator, batch_size=eval_batch_size)
metric = load_metric("glue", task_name)
model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=num_labels).to(device)
print('Initial: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
p_trainer = functools.partial(trainer, train_dataloader=train_dataloader)
optimizer = Adam(model.parameters(), lr=2e-5)
pruner = MovementPruner(model, config_list, p_trainer, optimizer, criterion, training_epochs=10,
warm_up_step=3000, cool_down_beginning_step=27000)
_, masks = pruner.compress()
pruner.show_pruned_weights()
print('Final: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
optimizer = Adam(model.parameters(), lr=2e-5)
trainer(model, optimizer, criterion, train_dataloader)
print('After 1 epoch finetuning: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
| [
"noreply@github.com"
] | penghouwen.noreply@github.com |
2adf288db73ef957c82ad3b82c56653c52cf1dfb | 53e58c213232e02250e64f48b97403ca86cd02f9 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisQCD_HT200to300.py | ca9031e1be70ac3888d31407f97e120bf09f4de2 | [] | no_license | xdlyu/fullRunII_ntuple_102X | 32e79c3bbc704cfaa00c67ab5124d40627fdacaf | d420b83eb9626a8ff1c79af5d34779cb805d57d8 | refs/heads/master | 2020-12-23T15:39:35.938678 | 2020-05-01T14:41:38 | 2020-05-01T14:41:38 | 237,192,426 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'QCD_HT200to300'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.sendExternalFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/QCD_HT200to300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
name = 'WWW'
steam_dir = 'xulyu'
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'QCD_HT200to300'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
1ca9421c30d507ffd0c20ef335be2cd7e57b5697 | 347a6aac6fc40edab03d75a53e89053aeeb8fd72 | /quizzes/Quiz7.py | f9e8bdc18bfc69f7deba22dcd76922245d4bc853 | [] | no_license | jwilke/cs373 | 0d5de4676c13e83c8b9dbcab66140be53cebeaf9 | 7923f3710eaa76d38d8261d6dc596f2bfaf12a8e | refs/heads/master | 2021-01-16T00:23:35.227487 | 2012-07-05T22:53:14 | 2012-07-05T22:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | #!/usr/bin/env python
"""
CS373: Quiz #7 (9 pts)
"""
""" ----------------------------------------------------------------------
1. In the paper, "A Bug and a Crash" about the Ariane 5, what was the
software bug?
(2 pts)
the conversion of a 64-bit number to a 16-bit number
"""
""" ----------------------------------------------------------------------
2. In the paper, "Mariner 1", what was the software bug?
(1 pt)
the ommission of a hyphen
"""
""" ----------------------------------------------------------------------
3. What is the output of the following program?
(2 pts)
True
False
"""
a = [2, 3, 4]
b = a
b += [5]
print a is b
a = (2, 3, 4)
b = a
b += (5,)
print a is b
""" ----------------------------------------------------------------------
4. What semantic difference is there between Java's conditional expression
and Python's? Why?
(4 pts)
Java's then and else clause must be of the same type
Java is typed and the compiler must be able to determine the type of the
entire conditional expression
"""
| [
"downing@cs.utexas.edu"
] | downing@cs.utexas.edu |
352b804747cc226f09e7a42316e6262c0a63a77b | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2020/August/Design HashSet.py | 8ac9ddd111bec7159b93c31f2ee92c9ef039863d | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | class MyHashSet:
def __init__(self):
self.numBuckets = 15000
self.buckets = [[] for _ in range(self.numBuckets)]
def hash_function(self, key):
return key % self.numBuckets
def add(self, key):
i = self.hash_function(key)
if not key in self.buckets[i]:
self.buckets[i].append(key)
def remove(self, key):
i = self.hash_function(key)
if key in self.buckets[i]:
self.buckets[i].remove(key)
def contains(self, key):
i = self.hash_function(key)
if key in self.buckets[i]:
return True
else:
return False
| [
"mariandanaila01@gmail.com"
] | mariandanaila01@gmail.com |
af8a2ebd0fe7f5f06e5aeccc636bf8704d77ef8a | ccf6945c819e25a3eae92103116b7d1063e21871 | /chess_piece_detection/app/other_models/alexnet.py | 6da26d5ea58921656765650c08f6ac477852e5d1 | [] | no_license | ace-racer/Extending-Board-Games-using-deep-learning | 67355eabec91ed8afb1150e4c2d6bb0068e74910 | 7171b14c80be35b0ee882322c318307779379e9a | refs/heads/master | 2020-04-10T05:32:18.705335 | 2019-04-22T11:42:47 | 2019-04-22T11:42:47 | 160,830,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | # Import necessary components to build LeNet
# Reference: https://github.com/eweill/keras-deepcv/blob/master/models/classification/alexnet.py
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
# Other imports
import numpy as np
import os
# custom imports
import appconfigs
import modelconfigs
import constants
import utils
def train_alexnet_model(model_configs, train_model=True, num_samples=None):
print("Alexnet model...")
X_train, y_train = utils.get_required_data_with_labels_for_CNN(appconfigs.location_of_train_data, num_samples)
X_test, y_test = utils.get_required_data_with_labels_for_CNN(appconfigs.location_of_test_data, num_samples)
# Initialize model
alexnet = Sequential()
# Layer 1
alexnet.add(Conv2D(96, (11, 11), input_shape=(200, 200, 3),
padding='same', kernel_regularizer=l2(0.)))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
alexnet.add(Conv2D(256, (5, 5), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 3
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(512, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 4
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
# Layer 5
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 6
alexnet.add(Flatten())
alexnet.add(Dense(3072))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 7
alexnet.add(Dense(4096))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 8
alexnet.add(Dense(constants.num_output_classes))
alexnet.add(BatchNormalization())
alexnet.add(Activation('softmax'))
batch_size = model_configs["batch_size"][0]
# number of training epochs
nb_epoch = model_configs["epochs"][0]
if train_model:
filepath = os.path.join(appconfigs.model_folder_location, model_configs["model_weights_file_name"][0])
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True,
mode='max')
earlystop = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10,
verbose=1, mode='max')
tensorboard = TensorBoard(log_dir=appconfigs.tensorboard_logs_folder_location, histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [checkpoint, earlystop, tensorboard]
adam = Adam(lr=model_configs["lr"][0], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
alexnet.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
hist = alexnet.fit(X_train, y_train, shuffle=True, batch_size=batch_size,
epochs=nb_epoch, verbose=1,
validation_data=(X_test, y_test), callbacks=callbacks_list)
return hist, alexnet, X_test, y_test
else:
adam = Adam(lr=model_configs["lr"][0], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
alexnet.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
return None, alexnet, X_test, y_test
| [
"anuragchatterjee92@gmail.com"
] | anuragchatterjee92@gmail.com |
3415d6c90827899f16903882ab9e54d437fd5b09 | 34eb0a65adb4290d6224075a2700d432d5649de6 | /tachyon/common/restclient.py | 1d6c499cbfc98be41954791218186029b21669a2 | [
"BSD-3-Clause"
] | permissive | Vuader/tachyon_common | ae950e13ac14df590cc76b3c7f98c26434104217 | a675d13a251aeda16dba7a416354872ee41509e6 | refs/heads/master | 2021-01-19T09:51:55.904274 | 2017-02-15T11:50:00 | 2017-02-15T11:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,684 | py | # Tachyon OSS Framework
#
# Copyright (c) 2016-2017, see Authors.txt
# All rights reserved.
#
# LICENSE: (BSD3-Clause)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENTSHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import thread
import json
import tachyon.ui
try:
# python 3
from io import BytesIO
except ImportError:
# python 2
from StringIO import StringIO as BytesIO
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
import nfw
log = logging.getLogger(__name__)
sessions = {}
class RestClient(nfw.RestClient):
def __init__(self, url, username=None, password=None, domain=None):
global sessions
self.thread_id = thread.get_ident()
if self.thread_id not in sessions:
sessions[self.thread_id] = {}
self.session = sessions[self.thread_id]
self.url = url
if url in self.session:
self.username = self.session[url]['username']
self.password = self.session[url]['password']
self.domain = self.session[url]['domain']
self.tachyon_headers = self.session[url]['headers']
super(RestClient, self).__init__()
else:
self.session[url] = {}
self.session[url]['username'] = username
self.session[url]['password'] = password
self.session[url]['domain'] = domain
self.session[url]['headers'] = {}
self.username = username
self.password = password
self.domain = domain
super(RestClient, self).__init__()
self.tachyon_headers = self.session[url]['headers']
if username is not None:
self.authenticate(url, username, password, domain)
def authenticate(self, username, password, domain):
url = self.url
auth_url = "%s/login" % (url,)
if 'token' in self.tachyon_headers:
del self.tachyon_headers['token']
self.tachyon_headers['X-Domain'] = domain
data = {}
data['username'] = username
data['password'] = password
data['expire'] = 1
server_headers, result = self.execute("POST", auth_url,
data, self.tachyon_headers)
if 'token' in result:
self.token = result['token']
self.tachyon_headers['X-Auth-Token'] = self.token
else:
raise tachyon.ui.exceptions.Authentication("Could not connect/authenticate")
self.session[url]['headers'] = self.tachyon_headers
return result
def token(self, token, domain, tenant):
log.error("TOKEN %s" % (token,))
url = self.url
auth_url = "%s/login" % (url,)
self.tachyon_headers['X-Tenant'] = tenant
self.tachyon_headers['X-Domain'] = domain
self.tachyon_headers['X-Auth-Token'] = token
server_headers, result = self.execute("GET", auth_url,
None, self.tachyon_headers)
if 'token' in result:
self.token = token
else:
raise tachyon.ui.exceptions.Authentication("Could not connect/authenticate")
self.session[url]['headers'] = self.tachyon_headers
return result
def domain(self, domain):
self.tachyon_headers['X-Domain'] = domain
self.session[url]['headers'] = self.tachyon_headers
def tenant(self, tenant):
self.tachyon_headers['X-Tenant'] = tenant
self.session[url]['headers'] = self.tachyon_headers
def execute(self, request, url, obj=None, headers=None):
if obj is not None:
data = json.dumps(obj)
else:
data = None
if self.url not in url:
url = "%s/%s" % (self.url, url)
if headers is None:
headers = self.tachyon_headers
else:
headers.update(self.tachyon_headers)
server_headers, response = super(RestClient, self).execute(request, url, data, headers)
if response is not None:
response = json.loads(response)
return [server_headers, response]
| [
"christiaan.rademan@gmail.com"
] | christiaan.rademan@gmail.com |
12f85dfe3b746c02305c2dd7cc147b806129fb82 | 391d648132c1a05e7da575205eef89a7208a892a | /scripts/playground/phase.py | 0fe2427793d433ebdd4a2e65cbfcf3a84a814b9a | [] | no_license | michaelbateman/DemographicInference | c3ceaf69f8b554f3973473607d6b5201cca423f9 | b1e2529b1ce0710f82d2867d08588ae4a6c72bb2 | refs/heads/master | 2021-01-10T06:28:43.698581 | 2015-10-06T18:25:45 | 2015-10-06T18:25:45 | 43,770,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | import subprocess
in_file = 'ag1000g.phase1.AR2.3L.PASS.vcf.gz'
region_size = 5000000
num_regions = 50e6 / region_size
print num_regions
print int(num_regions)
for k in range(0,int(num_regions)):
i = int(num_regions) -1 - k
left = i * region_size
right = (i+1) * region_size
window = str(left) + '-' + str(right)
out_file = 'ag1000g.phase1.AR2.3L.PASS.' + window + '.vcf'
call_string = 'time ../bcftools/bcftools view -o ' + out_file + ' ' + in_file + ' -r 3L:' + window
print call_string
print 'Now creating the file: ', out_file
print '.....'
subprocess.call(call_string, shell=True)
call_string = '../pbwtCurrent/pbwt/pbwt -readVcfGT ' + out_file + ' -writeAll temp_file_name'
print call_string
print 'Now preparing site file...'
subprocess.call(call_string, shell=True)
# The 1530 just below is the number of haplotypes in 765 samples
# Should change in different situation
phased_name = 'ag1000g.phase1.AR2.3L.PASS.' + window + '.phased.vcf'
call_string = '../pbwtCurrent/pbwt/pbwt -readVcfGT ' + out_file + ' -phase 1530 -readAll temp_file_name -writeVcf ' + phased_name
print call_string
print 'Now phasing...'
subprocess.call(call_string, shell=True)
call_string = 'time gzip ' + phased_name
print call_string
subprocess.call(call_string, shell=True)
call_string = 'rm ' + out_file
print call_string
subprocess.call(call_string, shell=True)
print 'Progress: %d out of %d regions complete.' %(k+1, num_regions)
print call_string | [
"bbeeefcake@gmail.com"
] | bbeeefcake@gmail.com |
c3adcbeba8fc8166b6429a87de5ab17b4187ccfd | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /nni/experiment/rest.py | bdacc7c215ac759fdb551e7d4fa3d6e296e45fd1 | [
"MIT"
] | permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 1,156 | py | import logging
from typing import Any, Optional
import requests
_logger = logging.getLogger(__name__)
url_template = 'http://localhost:{}/api/v1/nni{}'
timeout = 20
def request(method: str, port: Optional[int], api: str, data: Any = None) -> Any:
if port is None:
raise RuntimeError('Experiment is not running')
url = url_template.format(port, api)
if data is None:
resp = requests.request(method, url, timeout=timeout)
else:
resp = requests.request(method, url, json=data, timeout=timeout)
if not resp.ok:
_logger.error('rest request %s %s failed: %s %s', method.upper(), url, resp.status_code, resp.text)
resp.raise_for_status()
if method.lower() in ['get', 'post'] and len(resp.content) > 0:
return resp.json()
def get(port: Optional[int], api: str) -> Any:
return request('get', port, api)
def post(port: Optional[int], api: str, data: Any) -> Any:
return request('post', port, api, data)
def put(port: Optional[int], api: str, data: Any) -> None:
request('put', port, api, data)
def delete(port: Optional[int], api: str) -> None:
request('delete', port, api)
| [
"noreply@github.com"
] | penghouwen.noreply@github.com |
e6bd6f44f4b8d52a1fe03affd4b5296e02733784 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03435/s544670590.py | b7a33f056509c825aa6f270f9dacfc4421f64bb9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | c1= list(map(int, input().split()))
c2 = list(map(int, input().split()))
c3 = list(map(int, input().split()))
a1 = 0
a2 = c2[0] - c1[0]
a3 = c3[0] - c1[0]
b1 = c1[0]
b2 = c1[1]
b3 = c1[2]
if c1[0] == a1 + b1 and c1[1] == a1 + b2 and c1[2] == a1 + b3 and c2[0] == a2 + b1 and c2[1] == a2 + b2 and c2[2] == a2 + b3 and c3[0] == a3 + b1 and c3[1] == a3 + b2 and c3[2] == a3 + b3:
print('Yes')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2be58aa42be5d9593ef75e0651cfefb8cbdd0f51 | 3b66632458e2463db62a800f9a0cf9e13c71a47e | /examples/template_tfe_multiple_optimizers/edflow.py | d0c88a39f566f2597d3f743824684633f16c3834 | [
"MIT"
] | permissive | pesser/edflow | eddb6d9341b861670946c157363933e9add52288 | 317cb1b61bf810a68004788d08418a5352653264 | refs/heads/dev | 2022-12-09T05:19:35.850173 | 2020-07-21T16:29:15 | 2020-07-21T16:29:15 | 146,750,121 | 27 | 15 | MIT | 2022-12-07T20:55:50 | 2018-08-30T12:59:11 | Python | UTF-8 | Python | false | false | 7,162 | py | import functools
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow.keras as tfk
import numpy as np
from edflow import TemplateIterator, get_logger
class FullLatentDistribution(object):
# TODO: write some comment on where this comes from
def __init__(self, parameters, dim, stochastic=True):
self.parameters = parameters
self.dim = dim
self.stochastic = stochastic
ps = self.parameters.shape.as_list()
if len(ps) != 2:
self.expand_dims = True
self.parameters = tf.reshape(self.parameters, (ps[0], ps[3]))
ps = self.parameters.shape.as_list()
else:
self.expand_dims = False
assert len(ps) == 2
self.batch_size = ps[0]
event_dim = self.dim
n_L_parameters = (event_dim * (event_dim + 1)) // 2
size_splits = [event_dim, n_L_parameters]
self.mean, self.L = tf.split(self.parameters, size_splits, axis=1)
# L is Cholesky parameterization
self.L = tf.contrib.distributions.fill_triangular(self.L)
# make sure diagonal entries are positive by parameterizing them
# logarithmically
diag_L = tf.linalg.diag_part(self.L)
self.log_diag_L = diag_L # keep for later computation of logdet
diag_L = tf.exp(diag_L)
# scale down then set diags
row_weights = np.array([np.sqrt(i + 1) for i in range(event_dim)])
row_weights = np.reshape(row_weights, [1, event_dim, 1])
self.L = self.L / row_weights
self.L = tf.linalg.set_diag(self.L, diag_L)
self.Sigma = tf.matmul(self.L, self.L, transpose_b=True) # L times L^t
ms = self.mean.shape.as_list()
self.event_axes = list(range(1, len(ms)))
self.event_shape = ms[1:]
assert len(self.event_shape) == 1, self.event_shape
@staticmethod
def n_parameters(dim):
return dim + (dim * (dim + 1)) // 2
def sample(self, noise_level=1.0):
if not self.stochastic:
out = self.mean
else:
eps = noise_level * tf.random_normal([self.batch_size, self.dim, 1])
eps = tf.matmul(self.L, eps)
eps = tf.squeeze(eps, axis=-1)
out = self.mean + eps
if self.expand_dims:
out = tf.expand_dims(out, axis=1)
out = tf.expand_dims(out, axis=1)
return out
def kl(self, other=None):
if other is not None:
raise NotImplemented("Only KL to standard normal is implemented.")
delta = tf.square(self.mean)
diag_covar = tf.reduce_sum(tf.square(self.L), axis=2)
logdet = 2.0 * self.log_diag_L
kl = 0.5 * tf.reduce_sum(
diag_covar - 1.0 + delta - logdet, axis=self.event_axes
)
kl = tf.reduce_mean(kl)
return kl
class Model(tfk.Model):
def __init__(self, config):
super().__init__()
self.z_dim = config["z_dim"]
self.n_z_params = FullLatentDistribution.n_parameters(self.z_dim)
self.lr = config["lr"]
self.encode = tfk.Sequential(
[
tfk.layers.Dense(
1000,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
500,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
300,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
self.n_z_params,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
]
)
self.decode = tfk.Sequential(
[
tfk.layers.Dense(300, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(500, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(1000, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(784, kernel_initializer="he_uniform"),
tfk.layers.Activation(tf.nn.tanh),
]
)
input_shape = (config["batch_size"], 28 ** 2)
self.build(input_shape)
self.submodels = {"decoder": self.decode, "encoder": self.encode}
def call(self, x):
x = tf.reshape(x, (-1, 28 ** 2))
posterior_params = self.encode(x)
posterior_distr = FullLatentDistribution(posterior_params, self.z_dim)
posterior_sample = posterior_distr.sample()
rec = self.decode(posterior_sample)
rec = tf.reshape(rec, (-1, 28, 28, 1))
output = {"x": x, "posterior_distr": posterior_distr, "rec": rec}
return output
class Iterator(TemplateIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# loss and optimizers
self.optimizers = {
submodel_name: tf.compat.v1.train.AdamOptimizer(learning_rate=self.model.lr)
for submodel_name, submodel in self.model.submodels.items()
}
# to save and restore
self.tfcheckpoint = tf.train.Checkpoint(model=self.model, **self.optimizers)
def save(self, checkpoint_path):
self.tfcheckpoint.write(checkpoint_path)
def restore(self, checkpoint_path):
self.tfcheckpoint.restore(checkpoint_path)
def step_op(self, model, **kwargs):
# get inputs
losses = {}
inputs = kwargs["image"]
# compute loss
with tf.GradientTape(persistent=True) as tape:
outputs = model(inputs)
loss = tf.reduce_mean(
tf.reduce_sum(tf.square(inputs - outputs["rec"]), axis=(1, 2, 3))
)
loss_kl = outputs["posterior_distr"].sample()
losses["encoder"] = loss + loss_kl
losses["decoder"] = loss
def train_op():
for loss_name, loss in losses.items():
optimizer = self.optimizers[loss_name]
submodel = self.model.submodels[loss_name]
params = submodel.trainable_variables
grads = tape.gradient(loss, params)
optimizer.apply_gradients(zip(grads, params))
image_logs = {"rec": np.array(outputs["rec"]), "x": np.array(inputs)}
scalar_logs = {"loss_rec": loss, "loss_kl": loss_kl}
def log_op():
return {
"images": image_logs,
"scalars": scalar_logs,
}
def eval_op():
eval_outputs = {}
eval_outputs.update(image_logs)
return eval_outputs
return {"train_op": train_op, "log_op": log_op, "eval_op": eval_op}
| [
"supermario94123@gmail.com"
] | supermario94123@gmail.com |
1ce01c3e5eafef0398c727f2132d92cef69b14ab | 2b93a5f46980e475375e796de139ed46a53809a6 | /Functions/Calculator.py | 0095b099701cd2e5bbf751a81ce9b7acc2e6e00c | [] | no_license | ravi4all/PythonMay_11 | c9996cb0a2741a5a022c74129aa21c1f4b495aba | f0f3fb5e99a67e704df2a109a7af3d8d18010f9d | refs/heads/master | 2020-03-16T21:28:06.549963 | 2018-05-30T05:34:50 | 2018-05-30T05:34:50 | 133,000,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | def add(x,y):
z = x + y
print("Addition is",z)
def sub(x,y):
z = x - y
print("Subtraction is",z)
def div(x,y):
z = x / y
print("Division is",z)
def mul(x,y):
z = x * y
print("Multiplication is",z)
# Menu Driven Programs
print("""
1. Add
2. Sub
3. Div
4. Mul
""")
user_choice = input("Enter your choice : ")
num_1 = int(input("Enter first number : "))
num_2 = int(input("Enter second number " ))
todo = {
"1" : add,
"2" : sub,
"3" : div,
"4" : mul
}
func = todo.get(user_choice)
# print(func)
func(num_1, num_2) | [
"noreply@github.com"
] | ravi4all.noreply@github.com |
1b168c8660752d0007441aec85f837fc3f33b6f2 | ca831a9dc9d6dc0b2cedc4d998b26600439b5f10 | /python/numpy/q4_np_concatenate.py | d47d611fb6d9b69f1a21103ef45d97b89b76e8e9 | [
"MIT"
] | permissive | mxdzi/hackerrank | c2579f4351fba5af1dec21a49485e043421c2dd8 | c8da62ac39a0c24f535eded74c102a9c0ccd7708 | refs/heads/master | 2022-12-26T20:10:36.948961 | 2022-12-08T18:27:51 | 2022-12-08T18:27:51 | 225,469,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import numpy
def main():
N, M, P = map(int, input().split())
array1 = numpy.array([input().split() for _ in range(N)], int)
array2 = numpy.array([input().split() for _ in range(M)], int)
print(numpy.concatenate((array1, array2)))
if __name__ == "__main__":
main()
| [
"michal@dziadowicz.it"
] | michal@dziadowicz.it |
7ba0744fc18c12981135e6b474843ce4bdb643e4 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/requests_oauthlib/oauth2_auth.py | d8712fd46c49685f885010eb48c0e21094a5eea6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | from __future__ import unicode_literals
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import is_secure_transport
from requests.auth import AuthBase
class OAuth2(AuthBase):
"""Adds proof of authorization (OAuth2 token) to the request."""
def __init__(self, client_id=None, client=None, token=None):
"""Construct a new OAuth 2 authorization object.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param token: Token dictionary, must include access_token
and token_type.
"""
self._client = client or WebApplicationClient(client_id, token=token)
if token:
for k, v in token.items():
setattr(self._client, k, v)
def __call__(self, r):
"""Append an OAuth 2 token to the request.
Note that currently HTTPS is required for all requests. There may be
a token type that allows for plain HTTP in the future and then this
should be updated to allow plain HTTP on a white list basis.
"""
if not is_secure_transport(r.url):
raise InsecureTransportError()
r.url, r.headers, r.body = self._client.add_token(r.url,
http_method=r.method, body=r.body, headers=r.headers)
return r
| [
"robert.fujara@gmail.com"
] | robert.fujara@gmail.com |
4cc5fa1c5d42c3b5b3744dc1eb24a06ed4c8e10c | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_siurl_list.py | 094c867212270e09508ac2b3c62b72a81fd28872 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.siurl_list import SIURLList # noqa: E501
from swagger_client.rest import ApiException
class TestSIURLList(unittest.TestCase):
"""SIURLList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSIURLList(self):
"""Test SIURLList"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.siurl_list.SIURLList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"pt1988@gmail.com"
] | pt1988@gmail.com |
4b11dd4cd2213194d38521b0f83f8f3b572200c8 | d79c152d072edd6631e22f886c8beaafe45aab04 | /nicolock/users/migrations/0004_user_user_type.py | 92f9060fd70d6a9a282d30ca0b5d03f2722b99e2 | [] | no_license | kabroncelli/Nicolock | 764364de8aa146721b2678c14be808a452d7a363 | 4c4343a9117b7eba8cf1daf7241de549b9a1be3b | refs/heads/master | 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 23:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_companyprofile'),
]
operations = [
migrations.AddField(
model_name='user',
name='user_type',
field=models.CharField(choices=[('homeowner', 'Homeowner'), ('contractor', 'Contractor')], default='contractor', max_length=12, verbose_name='user type'),
preserve_default=False,
),
]
| [
"brennen@lightningkite.com"
] | brennen@lightningkite.com |
51b4d394824411e8488ff400df0a553116936ee9 | 9d3b8d5f45e5407f3275542cf5792fd2510abfe4 | /Chapter8-Practice/test_8.3.3.py | be0013a975a93a720f793d9350d06c6bec324a9c | [] | no_license | Beautyi/PythonPractice | 375767583870d894801013b775c493bbd3c36ebc | 9104006998a109dcab0848d5540fb963b20f5b02 | refs/heads/master | 2020-04-23T09:58:50.065403 | 2019-04-08T02:55:52 | 2019-04-08T02:55:52 | 171,088,504 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #返回字典
def build_person(first_name, last_name):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name}
return person
musician = build_person('jimi', 'hendrix')
print(musician)
def build_person(first_name, last_name, age=' '):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name, 'age': age}
return person
musician = build_person('jimi', 'hendrix', '27')
print(musician)
def build_person(first_name, last_name, age=' '):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name, 'age': age}
if age:
person['age'] = age
return person
musician = build_person('jimi', 'hendrix', age=27)
print(musician)
| [
"1210112866@qq.com"
] | 1210112866@qq.com |
77bd1762c4aaac19096157edc60a32d1f6d81374 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/uribv6/db.py | b29be36943d31f0ffadcc44b5729663096da2e21 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,200 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Db(Mo):
"""
"""
meta = ClassMeta("cobra.model.uribv6.Db")
meta.moClassName = "uribv6Db"
meta.rnFormat = "db-%(type)s"
meta.category = MoCategory.REGULAR
meta.label = "Database"
meta.writeAccessMask = 0x8008421042001
meta.readAccessMask = 0x8008421042001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.uribv6.Route")
meta.childNamesAndRnPrefix.append(("cobra.model.uribv6.Route", "rt-"))
meta.parentClasses.add("cobra.model.uribv6.Dom")
meta.superClasses.add("cobra.model.l3.Db")
meta.superClasses.add("cobra.model.nw.Db")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.rib.Db")
meta.superClasses.add("cobra.model.nw.GEp")
meta.superClasses.add("cobra.model.nw.Item")
meta.rnPrefixes = [
('db-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 16436, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 17496, PropCategory.REGULAR)
prop.label = "Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "rt"
prop._addConstant("adj", "adjacency-database", 3)
prop._addConstant("nh", "nexthop-database", 2)
prop._addConstant("rt", "route-database", 1)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "type"))
def __init__(self, parentMoOrDn, type, markDirty=True, **creationProps):
namingVals = [type]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
3a5e3a9076882a87027c00689734bedef960925d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03425/s478084192.py | 7e20b760b92578dea372ba9ffdd4d4f5431cd5bc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from itertools import combinations
N = int(input())
A = [0 for _ in range(5)]
for _ in range(N):
a = input().strip()
if a[0]=="M":
A[0] += 1
elif a[0]=="A":
A[1] += 1
elif a[0]=="R":
A[2] += 1
elif a[0]=="C":
A[3] += 1
elif a[0]=="H":
A[4] += 1
cnt = 0
for x in combinations(range(5),3):
cnt += A[x[0]]*A[x[1]]*A[x[2]]
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8e9f3948ab5c1e5a196edddb2fe11e19304dd0e5 | 1cd0c5706f5afcabccf28b59c15d306b114dd82a | /siteapi/migrations/0004_auto_20170220_2215.py | b448ea88ca4bcdc7a5f1e878f3326138726cde40 | [] | no_license | jacobbridges/scribbli | fb1ed8633fc8ebcd7d989fbab2e051612bdc07d2 | eb21ca9f5ee4c7caba5a25b76c6cdfe81af5d995 | refs/heads/master | 2021-01-12T10:32:38.466332 | 2018-01-27T19:48:39 | 2018-01-27T19:48:39 | 81,711,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-20 22:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteapi', '0003_auto_20170220_2213'),
]
operations = [
migrations.AlterField(
model_name='alphainvitation',
name='date_expires',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 21, 22, 15, 51, 651428), verbose_name='date expires'),
),
migrations.AlterField(
model_name='alphainvitation',
name='unik',
field=models.CharField(max_length=36),
),
]
| [
"him@jacobandkate143.com"
] | him@jacobandkate143.com |
beb923b2521bb0f9e00e5a892115a68855650a54 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/servicebus/aaz/latest/servicebus/namespace/private_link_resource/_show.py | eae4030227ceaa104f1f40abb9b01954ee7cdcc9 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 6,208 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus namespace private-link-resource show",
)
class Show(AAZCommand):
"""List lists of resources that supports Privatelinks.
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/privatelinkresources", "2022-10-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateLinkResourcesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class PrivateLinkResourcesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateLinkResources",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = cls._schema_on_200.value.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.required_members = AAZListType(
serialized_name="requiredMembers",
)
properties.required_zone_names = AAZListType(
serialized_name="requiredZoneNames",
)
required_members = cls._schema_on_200.value.Element.properties.required_members
required_members.Element = AAZStrType()
required_zone_names = cls._schema_on_200.value.Element.properties.required_zone_names
required_zone_names.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
84c9288217995f2610547ebe92e52a2f6e69d003 | d5ee3688c0df793a765aa7fca25253ef450b82e9 | /src/scs_mfr/opc_conf.py | a821e3faf7068277766ea4005206cbf8c299d8b0 | [
"MIT"
] | permissive | seoss/scs_mfr | 0e85146c57dfefd605967e7dd54c666bfefddf74 | 997dd2b57160df30ef8750abed7efa87831e4c66 | refs/heads/master | 2023-01-20T23:58:16.547082 | 2020-11-27T09:40:20 | 2020-11-27T09:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,256 | py | #!/usr/bin/env python3
"""
Created on 13 Jul 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
DESCRIPTION
The opc_conf utility is used to specify whether an Alphasense optical particle counter (OPC) is present and if so,
which model is attached. An option is also available to override the host's default SPI bus and SPI chip select
lines for the OPC.
The specification also includes the number of seconds between readings by the OPC monitor sub-process. The maximum
time between readings is 10 seconds, the minimum five. A 10 second period provides the highest precision, but sampling
at this rate may be subject to clipping in extremely polluted environments.
The --restart-on-zeroes flag can be used to test the OPC in some situations, by overriding the default behaviour,
which is to restart the OPC if repeated zero readings are presented.
Flags are included to add or remove data interpretation exegetes, together with the source of T / rH readings.
Use of these is under development.
Sampling is performed by the scs_dev/particulates_sampler utility. If an opc_conf.json document is not present, the
scs_dev/particulates_sampler utility terminates.
Note that the scs_dev/particulates_sampler process must be restarted for changes to take effect.
The Alphasense OPC-N2, OPC-N3, OPC-R1, and Sensirion SPS30 models are supported.
Alternate exegetes (data interpretation models) can be added or removed - available interpretations can be listed with
the --help flag.
SYNOPSIS
opc_conf.py [-n NAME] [{ [-m MODEL] [-s SAMPLE_PERIOD] [-z { 0 | 1 }] [-p { 0 | 1 }]
[-b BUS] [-a ADDRESS] [-i INFERENCE_UDS] [-e EXEGETE] [-r EXEGETE] | -d }] [-v]
EXAMPLES
./opc_conf.py -m N2 -b 0 -a 1 -e ISLin/Urban/N2/v1
./opc_conf.py -m S30 -b 1
DOCUMENT EXAMPLE
{"model": "N3", "sample-period": 10, "restart-on-zeroes": true, "power-saving": false,
"inf": "/home/scs/SCS/pipes/lambda-model-pmx-s1.uds", "exg": []}
FILES
~/SCS/conf/opc_conf.json
SEE ALSO
scs_dev/particulates_sampler
scs_mfr/opc_cleaning_interval
REFERENCES
https://github.com/south-coast-science/scs_core/blob/develop/src/scs_core/particulate/exegesis/exegete_catalogue.py
BUGS
The specification allows for a power saving mode - which enables the OPC to shut down between readings - but
this is not currently implemented.
"""
import sys
from scs_core.data.json import JSONify
from scs_dfe.particulate.opc_conf import OPCConf
from scs_host.sys.host import Host
from scs_mfr.cmd.cmd_opc_conf import CmdOPCConf
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
incompatibles = []
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdOPCConf()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("opc_conf: %s" % cmd, file=sys.stderr)
sys.stderr.flush()
# ----------------------------------------------------------------------------------------------------------------
# resources...
# OPCConf...
conf = OPCConf.load(Host, name=cmd.name)
# ----------------------------------------------------------------------------------------------------------------
# run...
if cmd.set():
if conf is None and not cmd.is_complete():
print("opc_conf: No configuration is stored - you must therefore set the required fields.",
file=sys.stderr)
cmd.print_help(sys.stderr)
exit(2)
model = cmd.model if cmd.model else conf.model
sample_period = cmd.sample_period if cmd.sample_period else conf.sample_period
restart_on_zeroes = cmd.restart_on_zeroes if cmd.restart_on_zeroes is not None else conf.restart_on_zeroes
power_saving = cmd.power_saving if cmd.power_saving is not None else conf.power_saving
if conf is None:
conf = OPCConf(None, 10, True, False, None, None, None, []) # permit None for bus and address settings
bus = conf.bus if cmd.bus is None else cmd.bus
address = conf.address if cmd.address is None else cmd.address
inference = conf.inference if cmd.inference is None else cmd.inference
conf = OPCConf(model, sample_period, restart_on_zeroes, power_saving,
bus, address, inference, conf.exegete_names)
if cmd.use_exegete:
conf.add_exegete(cmd.use_exegete)
if cmd.remove_exegete:
conf.discard_exegete(cmd.remove_exegete)
# compatibility check...
try:
incompatibles = conf.incompatible_exegetes()
except KeyError as ex:
print("opc_conf: The following exegete is not valid: %s." % ex, file=sys.stderr)
exit(1)
if incompatibles:
print("opc_conf: The following exegetes are not compatible with %s: %s." %
(conf.model, ', '.join(incompatibles)),
file=sys.stderr)
exit(1)
conf.save(Host)
elif cmd.delete:
conf.delete(Host, name=cmd.name)
conf = None
if conf:
print(JSONify.dumps(conf))
| [
"bruno.beloff@southcoastscience.com"
] | bruno.beloff@southcoastscience.com |
ad5ae115186a694489f6794a6279b0b75e037ee8 | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/SVM/Mutants/code/SVM_rbf/DigitRecognitionApp_47.py | 2be1dd52e2ea4cb532d52028ef938535d52fe789 | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | """
Created on Fri May 26 15:20:01 2017
#Digit Recognition for V & V
#Following note added by RR
Note:
1. The actual digits data from the http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits is different than the one referred in this sklearn example
2. For more info, refer this link http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html and the above one.
3. The digits data referred by this Sklearn example can be downloaded from the following link.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/data/digits.csv.gz
"""
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
import numpy as np
import _pickle as cPickle
digits = np.loadtxt('digits_Train.csv', delimiter=',')
digits_images_flat = digits[:,:(-1)]
digits_images = digits_images_flat.view()
digits_images.shape = ((-1), 8, 8)
digits_target = digits[:,(-1)].astype(np.int)
digits_test = np.loadtxt('digits_Test.csv', delimiter=',')
digits_test_images_flat = digits_test[:,:(-1)]
digits_test_images = digits_test_images_flat.view()
digits_test_images.shape = ((-1), 8, 8)
digits_test_target = digits_test[:,(-1)].astype(np.int)
images_and_labels = list(zip(digits_images, digits_target))
n_samples = len(digits_images)
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits_images_flat, digits_target)
expected = digits_test_target
predicted = classifier.predict(digits_test_images_flat)
print('Classification report for classifier %s:\n%s\n' % (
classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
print("accuracy:", metrics.accuracy_score(expected, predicted))
images_and_predictions = list(zip(digits_test_images, predicted))
np.savetxt('output.txt', classifier.decision_function(digits_test_images_flat))
outputData = {'data_array': metrics.confusion_matrix(expected, predicted)}
with open('output.pkl', 'wb') as outputFile:
cPickle.dump(outputData, outputFile)
with open('model.pkl', 'mutpy') as modelFile:
cPickle.dump(classifier, modelFile) | [
"anurag.bms@gmail.com"
] | anurag.bms@gmail.com |
e23bc12419592f4b9956c4150d64796a12d4900f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03329/s950503139.py | 06a2ed1899ce879a8061ac47bf453dca06be7b16 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import sys
read = sys.stdin.read
readlines = sys.stdin.readlines
def main():
n = int(input())
nums = []
n6 = 6
while n6 <= n:
nums.append(n6)
n6 = n6 * 6
n9 = 9
while n9 <= n:
nums.append(n9)
n9 = n9 * 9
nums.sort(reverse=True)
dp = [i for i in range(2 * n + 1)]
for num in nums:
for j1 in range(n + 1):
dp[j1+num] = min(dp[j1+num], dp[j1] + 1)
print(dp[n])
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6b31c5782ba2db81a6a2b0b105aa3a0552dcb4ad | 0e4519d3a94157a419e56576875aec1da906f578 | /Python_200Q/051_/Q059.py | 110df8a421ca7cff330c0a2f0054d5279bd7f11d | [] | no_license | ivorymood/TIL | 0de3b92861e345375e87d01654d1fddf940621cd | 1f09e8b1f4df7c205c68eefd9ab02d17a85d140a | refs/heads/master | 2021-02-23T17:30:50.406370 | 2020-10-02T06:43:25 | 2020-10-02T06:43:25 | 245,388,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | import time
count = 1
try:
while True:
print(count)
count += 1
time.sleep(0.5)
# Ctrl + C가 입력되면 발생되는 오류
except KeyboardInterrupt:
print('사용자에 의해 프로그램이 중단되었습니다') | [
"ivorymood@gmail.com"
] | ivorymood@gmail.com |
6a2b5689eeaab249fbbcd16268bfeaf37add46d9 | a5a2abaf5c7a681ebea71b4034d7b12dbd750455 | /examens/migrations/0002_auto_20160210_0540.py | 4a24c8b6dac1ce2a87efbeacc069d098d652c98a | [
"BSD-3-Clause"
] | permissive | matinfo/dezede | e8be34a5b92f8e793a96396f7ec4ec880e7817ff | 829ba8c251a0301741460e6695438be52d04a2fc | refs/heads/master | 2020-03-15T11:25:56.786137 | 2018-04-23T13:47:20 | 2018-04-23T13:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,213 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
LEVELS_DATA = (
(1, (6107, 10442, 10531)),
(2, (2253, 12468, 12469)),
(3, (10603, 8167, 10447)),
(4, (8280, 15117)),
(5, (3412, 14)),
(6, (2256,)),
)
LEVELS_HELPS = {
1: """
<p>
L’exercice consiste à transcrire un ensemble de sources
de manière diplomatique.
Il comporte six étapes à la difficulté croissante.
Chaque étape est validée lorsque le texte saisi
correspond exactement au texte contenu dans la source.
</p>
<p>
Pour ce type de transcription, il est important de respecter
le texte de la source : graphie fautive, style (capitales,
petites capitales, etc.), abréviations, ponctuation.
Deux exceptions sont admises :
</p>
<ul>
<li>
l’accentuation doit être rétablie suivant l’usage moderne
(y compris sur les majuscules) ;
</li>
<li>
la justification ne doit pas être respectée :
vous devez aligner le texte à gauche.
</li>
</ul>
""",
2: """
<p>
Mêmes règles que pour la première étape.
On insiste cette fois-ci sur le respect des styles
(capitales, petites capitales, italique, gras, exposant).
</p>
""",
3: """
<p>
Dans une transcription diplomatique, l’usage est de respecter
les graphies fautives. Dans ce cas, le mot erroné doit être suivi
de la locution latine « sic » en italique et entre crochets carrés.
Par exemple : « Beethowen [<em>sic</em>] ».
</p>
""",
4: """<p>Combinaison des règles précédentes.</p>""",
5: """
<p>
Combinaison des règles précédentes sur une transcription plus longue.
</p>
<p>
Certaines fautes apparentes pour un lecteur d’aujourd’hui sont en fait
des usages d’orthographe de l’époque.
Par exemple, on écrivait indifféremment « accents » ou « accens »
pour le pluriel d’« accent ».
</p>
<p>Conservez l’orthographe des noms propres.</p>
""",
6: """
<p>
Utilisez les outils de tableau de l’éditeur de texte
pour obtenir un tableau sans bordure.
Ne pas inclure les points servant de guides dans le tableau.
</p>
""",
}
def add_levels(apps, schema_editor):
Level = apps.get_model('examens.Level')
LevelSource = apps.get_model('examens.LevelSource')
Source = apps.get_model('libretto.Source')
level_sources = []
for level_number, source_ids in LEVELS_DATA:
level = Level.objects.create(
number=level_number, help_message=LEVELS_HELPS[level_number])
for pk in source_ids:
try:
source = Source.objects.get(pk=pk)
except Source.DoesNotExist:
continue
level_sources.append(LevelSource(level=level, source=source))
LevelSource.objects.bulk_create(level_sources)
class Migration(migrations.Migration):
dependencies = [
('examens', '0001_initial'),
]
operations = [
migrations.RunPython(add_levels),
]
| [
"bordage.bertrand@gmail.com"
] | bordage.bertrand@gmail.com |
e78b99366d88cbdb16defac1ca2282fdf9ecf490 | 82f7c00aa14c95032fb6e6ff1029823404246b83 | /apps/statistics/rstats.py | a55468334938caefa6725db99cec04117e861e29 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gillbates/NewsBlur | 621feaa090cdc2fe9dcfcae4af7de3f40b69ba00 | 0eb2ccf4ebe59ff27d6ed822cc406a427cf3bf6a | refs/heads/master | 2020-12-30T17:32:03.999893 | 2013-07-01T00:12:41 | 2013-07-01T00:12:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,566 | py | import redis
import datetime
import re
from collections import defaultdict
from django.conf import settings
class RStats:
STATS_TYPE = {
'page_load': 'PLT',
'feed_fetch': 'FFH',
}
@classmethod
def stats_type(cls, name):
return cls.STATS_TYPE[name]
@classmethod
def add(cls, name, duration=None):
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
pipe = r.pipeline()
minute = round_time(round_to=60)
key = "%s:%s" % (cls.stats_type(name), minute.strftime('%s'))
pipe.incr("%s:s" % key)
if duration:
pipe.incrbyfloat("%s:a" % key, duration)
pipe.expireat("%s:a" % key, (minute + datetime.timedelta(days=2)).strftime("%s"))
pipe.expireat("%s:s" % key, (minute + datetime.timedelta(days=2)).strftime("%s"))
pipe.execute()
@classmethod
def clean_path(cls, path):
if not path:
return
if path.startswith('/reader/feed/'):
path = '/reader/feed/'
elif path.startswith('/social/stories'):
path = '/social/stories/'
elif path.startswith('/reader/river_stories'):
path = '/reader/river_stories/'
elif path.startswith('/social/river_stories'):
path = '/social/river_stories/'
elif path.startswith('/reader/page/'):
path = '/reader/page/'
elif path.startswith('/api/check_share_on_site'):
path = '/api/check_share_on_site/'
return path
@classmethod
def count(cls, name, hours=24):
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
stats_type = cls.stats_type(name)
now = datetime.datetime.now()
pipe = r.pipeline()
for minutes_ago in range(60*hours):
dt_min_ago = now - datetime.timedelta(minutes=minutes_ago)
minute = round_time(dt=dt_min_ago, round_to=60)
key = "%s:%s" % (stats_type, minute.strftime('%s'))
pipe.get("%s:s" % key)
values = pipe.execute()
total = sum(int(v) for v in values if v)
return total
@classmethod
def sample(cls, sample=1000, pool=None):
if not pool:
pool = settings.REDIS_STORY_HASH_POOL
r = redis.Redis(connection_pool=pool)
keys = set()
errors = set()
prefixes = defaultdict(set)
prefixes_ttls = defaultdict(lambda: defaultdict(int))
prefix_re = re.compile(r"(\w+):(.*)")
p = r.pipeline()
[p.randomkey() for _ in range(sample)]
keys = set(p.execute())
p = r.pipeline()
[p.ttl(key) for key in keys]
ttls = p.execute()
for k, key in enumerate(keys):
match = prefix_re.match(key)
if not match:
errors.add(key)
continue
prefix, rest = match.groups()
prefixes[prefix].add(rest)
ttl = ttls[k]
if ttl < 60*60: # 1 hour
prefixes_ttls[prefix]['1h'] += 1
elif ttl < 60*60*12:
prefixes_ttls[prefix]['12h'] += 1
elif ttl < 60*60*24:
prefixes_ttls[prefix]['1d'] += 1
elif ttl < 60*60*168:
prefixes_ttls[prefix]['1w'] += 1
elif ttl < 60*60*336:
prefixes_ttls[prefix]['2w'] += 1
else:
prefixes_ttls[prefix]['2w+'] += 1
keys_count = len(keys)
print " ---> %s total keys" % keys_count
for prefix, rest in prefixes.items():
total_expiring = sum([k for k in dict(prefixes_ttls[prefix]).values()])
print " ---> %4s: (%.4s%%) %s keys (%s expiring: %s)" % (prefix, 100. * (len(rest) / float(keys_count)), len(rest), total_expiring, dict(prefixes_ttls[prefix]))
print " ---> %s errors: %s" % (len(errors), errors)
def round_time(dt=None, round_to=60):
"""Round a datetime object to any time laps in seconds
dt : datetime.datetime object, default now.
round_to : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
"""
if dt == None : dt = datetime.datetime.now()
seconds = (dt - dt.min).seconds
rounding = (seconds+round_to/2) // round_to * round_to
return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
f6687c34620096ecab62771b37059ffc0b24c773 | 0e4f76fc3f9aae8b00a67806dffcfc6360e310d1 | /classes/ifcannotationfillareaoccurrence.py | 153502113434a6de9b77ae22bb820913bb7669da | [] | no_license | HassanEmam/PyoIFC | e11f3b330d3e758aaae47ee9e70b2ae5b5847432 | c2ec4e017ce366b07686491dbe22827aec2711c0 | refs/heads/master | 2020-07-31T14:27:53.305768 | 2019-09-24T15:41:28 | 2019-09-24T15:41:28 | 210,630,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | class IFCANNOTATIONFILLAREAOCCURRENCE(IFCANNOTATIONOCCURRENCE):
def __init__(self, FillStyleTarget, GlobalOrLocal):
self.FillStyleTarget = FillStyleTarget
self.GlobalOrLocal = GlobalOrLocal | [
"hassan.emam@hotmail.com"
] | hassan.emam@hotmail.com |
8bf9fe5983a368347046e3207f8b7633072051ca | 3546dd5dbcffc8509440c820faa7cf28080c5df7 | /python35/Lib/site-packages/numpy/fft/tests/test_fftpack.py | e46d78fadb1b219e6e2ee42a40dcf35b89a2e984 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | Matchoc/python_env | 55ad609c8270cc6148eda22d37f36709d73b3652 | 859d84d1717a265a4085ad29706b12c19c62d36f | refs/heads/master | 2022-02-13T11:05:51.825544 | 2020-06-05T02:42:08 | 2020-06-05T02:42:08 | 75,793,921 | 0 | 1 | Apache-2.0 | 2018-12-14T07:30:28 | 2016-12-07T03:06:13 | Python | UTF-8 | Python | false | false | 6,218 | py | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.random import random
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy.testing import assert_array_equal
import threading
import sys
if sys.version_info[0] >= 3:
import queue
else:
import Queue as queue
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift(TestCase):
def test_fft_n(self):
self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D(TestCase):
def test_fft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(fft1(x), np.fft.fft(x))
assert_array_almost_equal(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"))
def test_ifft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x)))
assert_array_almost_equal(
x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho"))
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x))
assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"))
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x))
assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"))
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_array_almost_equal(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x))
assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"))
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_array_almost_equal(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x))
assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"))
def test_rfft(self):
x = random(30)
assert_array_almost_equal(np.fft.fft(x)[:16], np.fft.rfft(x))
assert_array_almost_equal(np.fft.rfft(x) / np.sqrt(30),
np.fft.rfft(x, norm="ortho"))
def test_irfft(self):
x = random(30)
assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x)))
assert_array_almost_equal(
x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"))
def test_rfft2(self):
x = random((30, 20))
assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x))
assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"))
def test_irfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x)))
assert_array_almost_equal(
x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"))
def test_rfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x))
assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"))
def test_irfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x)))
assert_array_almost_equal(
x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"))
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm))
assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"))
def test_ihttf(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)))
assert_array_almost_equal(
x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
norm="ortho"))
class TestFFTThreadSafe(TestCase):
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
if __name__ == "__main__":
run_module_suite()
| [
"matchoc@hotmail.com"
] | matchoc@hotmail.com |
00ffa9623b835b0bf4f8cab421978522270fe77f | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_QingDao_ZD_CLI.py | 9a483fb34fa64b69afd278f3ba300ea8d274eb7a | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | import sys
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
def tcid(tcid):
return "TCID:32.%02d" % tcid
def defineTestConfiguration(target_station):
test_cfgs = []
test_name = 'ZD_CLI_Testing'
common_name = 'wlaninfo -V: show VAP info'
test_cfgs.append(({'testcase':'wlaninfo_v'}, test_name, common_name, tcid(1)))
common_name = 'wlaninfo -S: show station info'
test_cfgs.append(({'testcase':'wlaninfo_s', 'target_station':target_station}, test_name, common_name, tcid(2)))
common_name = 'wlaninfo -T: show timer'
test_cfgs.append(({'testcase':'wlaninfo_t'}, test_name, common_name, tcid(3)))
common_name = 'wlaninfo -C: show configured APs'
test_cfgs.append(({'testcase':'wlaninfo_c'}, test_name, common_name, tcid(4)))
common_name = 'wlaninfo -R: show Rogue devices'
test_cfgs.append(({'testcase':'wlaninfo_r'}, test_name, common_name, tcid(5)))
common_name = 'wlaninfo -W: show WLAN info'
test_cfgs.append(({'testcase':'wlaninfo_w'}, test_name, common_name, tcid(6)))
common_name = 'wlaninfo -U: show users info'
test_cfgs.append(({'testcase':'wlaninfo_u'}, test_name, common_name, tcid(8)))
common_name = 'wlaninfo -M: show Mesh entries'
test_cfgs.append(({'testcase':'wlaninfo_m'}, test_name, common_name, tcid(9)))
common_name = 'apmgrinfo -a: Display APs info'
test_cfgs.append(({'testcase':'apmgrinfo_a'}, test_name, common_name, tcid(11)))
common_name = 'apmgrinfo -p: ping APMgr'
test_cfgs.append(({'testcase':'apmgrinfo_p'}, test_name, common_name, tcid(13)))
common_name = 'ping: ping to a destination'
test_cfgs.append(({'testcase':'ping'}, test_name, common_name, tcid(15)))
common_name = 'stp: enable/disable STP'
test_cfgs.append(({'testcase':'stp'}, test_name, common_name, tcid(16)))
common_name = 'upnp: enable/disable UPNP'
test_cfgs.append(({'testcase':'upnp'}, test_name, common_name, tcid(17)))
common_name = 'wlaninfo -A: show all active APs'
test_cfgs.append(({'testcase':'wlaninfo_a'}, test_name, common_name, tcid(23)))
common_name = 'wlaninfo --system: show system parameters'
test_cfgs.append(({'testcase':'wlaninfo_system'}, test_name, common_name, tcid(24)))
common_name = 'wlaninfo --dos: show all DOS entries'
test_cfgs.append(({'testcase':'wlaninfo_dos'}, test_name, common_name, tcid(25)))
common_name = 'wlaninfo --web-auth: show all authorized clients'
test_cfgs.append(({'testcase':'wlaninfo_web_auth', 'target_station':target_station}, test_name, common_name, tcid(26)))
common_name = 'wlaninfo --all-dpsk: show all dynamic PSK'
test_cfgs.append(({'testcase':'wlaninfo_dpsk', 'target_station':target_station}, test_name, common_name, tcid(28)))
common_name = 'wlaninfo --dcert: show all dynamic certificate'
test_cfgs.append(({'testcase':'wlaninfo_dcert', 'target_station':target_station}, test_name, common_name, tcid(29)))
common_name = 'wlaninfo --acl: show all L2 ACL'
test_cfgs.append(({'testcase':'wlaninfo_acl'}, test_name, common_name, tcid(30)))
common_name = 'wlaninfo --role: show all role'
test_cfgs.append(({'testcase':'wlaninfo_role'}, test_name, common_name, tcid(31)))
common_name = 'wlaninfo --auth: show all Authentication servers'
test_cfgs.append(({'testcase':'wlaninfo_auth'}, test_name, common_name, tcid(32)))
common_name = 'wlaninfo --pmk-cache: show all PMK cache'
test_cfgs.append(({'testcase':'wlaninfo_pmk', 'target_station':target_station}, test_name, common_name, tcid(33)))
common_name = 'wlaninfo --mesh-ap: show Mesh APs'
test_cfgs.append(({'testcase':'wlaninfo_mesh_ap'}, test_name, common_name, tcid(34)))
common_name = 'wlaninfo --mesh-topology: show Mesh Topology'
test_cfgs.append(({'testcase':'wlaninfo_mesh_topology'}, test_name, common_name, tcid(35)))
common_name = 'wlaninfo --mesh-history: show Mesh History'
test_cfgs.append(({'testcase':'wlaninfo_mesh_history'}, test_name, common_name, tcid(36)))
common_name = 'wlaninfo --all-wlangroup: show all WLAN group'
test_cfgs.append(({'testcase':'wlaninfo_wlangroup'}, test_name, common_name, tcid(37)))
common_name = 'wlaninfo -all-apgroup: show all AP groups'
test_cfgs.append(({'testcase':'wlaninfo_apgroup'}, test_name, common_name, tcid(38)))
common_name = 'wlaninfo --all-disc-ap: show all disconnected APs'
test_cfgs.append(({'testcase':'wlaninfo_disc_ap'}, test_name, common_name, tcid(39)))
common_name = 'show ap: show all active APs'
test_cfgs.append(({'testcase': 'show_ap'}, test_name, common_name, tcid(41)))
common_name = 'show ap: show all stations'
test_cfgs.append(({'testcase': 'show_station', 'target_station': target_station},
test_name, common_name, tcid(42)))
return test_cfgs
def make_test_suite(**kwargs):
#tbi = getTestbed(**kwargs)
#tb_cfg = testsuite.getTestbedConfig(tbi)
tb = testsuite.getTestbed2(**kwargs)
tbcfg = testsuite.getTestbedConfig(tb)
ap_sym_dict = tbcfg['ap_sym_dict']
sta_ip_list = tbcfg['sta_ip_list']
target_station = testsuite.getTargetStation(sta_ip_list)
ts_name = 'ZD CLI'
ts = testsuite.get_testsuite(ts_name, 'ZD CLI')
test_cfgs = defineTestConfiguration(target_station)
test_order = 1
test_added = 0
for test_params, test_name, common_name, tcid in test_cfgs:
cname = "%s - %s" % (tcid, common_name)
if testsuite.addTestCase(ts, test_name, cname, test_params, test_order) > 0:
test_added += 1
test_order += 1
print "Add test case with test_name: %s\n\tcommon_name: %s" % (test_name, cname)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict( sys.argv[1:] )
make_test_suite(**_dict)
| [
"tan@xx.com"
] | tan@xx.com |
7a7055c98db6e33109323e2cb20c2d9878f88586 | 28a2d58672a937c337b7310ed4c7d50219b176ce | /tests/test_search.py | ae98278b716f5a7645bbea496c41dd5e640c1bd3 | [
"MIT"
] | permissive | AnuradhaNaik/nyc-council-councilmatic | 9b7fc0fc255500222d96fc534a3638fd32d6ede7 | 94974de317e34dcb05165a7c23717960c400d942 | refs/heads/master | 2020-03-26T12:37:51.662574 | 2018-04-04T18:26:43 | 2018-04-04T18:26:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | from unittest.mock import MagicMock
import pytest
from haystack.query import SearchQuerySet
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
# Different combinations of possible parameters
sorters = ['title', 'date', 'relevance', None]
ascenders = ['true', None]
queries = ['test', None]
@pytest.mark.django_db
@pytest.mark.parametrize('sort_by', sorters)
@pytest.mark.parametrize('ascending', ascenders)
@pytest.mark.parametrize('query', queries)
def test_search_params(sort_by, ascending, query, mocker):
# Use different query strings depending on the params
if sort_by or ascending or query:
query_string = '?'
if sort_by:
query_string += 'sort_by={sort_by}'.format(sort_by=sort_by)
if ascending:
query_string += '&ascending={ascending}'.format(ascending=ascending)
if query:
query_string += '&q={query}'.format(query=query)
else:
query_string = ''
# Mock the SearchQuerySet.order_by method so we can track how it's used
sqs = MagicMock(spec=SearchQuerySet)
empty_qs = sqs
order_func = sqs.facet().facet().facet().facet().facet().highlight().order_by
order_func.return_value = empty_qs
mocker.patch('nyc.views.SearchQuerySet', return_value=sqs)
# Also mock out the `extra_context` method of the search view, which
# will try to check to make sure Solr is running otherwise
mocker.patch('nyc.views.NYCCouncilmaticFacetedSearchView.extra_context', return_value={})
# The Paginator object gets mad if Solr doesn't return any actual results,
# so let's mock it out too
pag = MagicMock(spec=Paginator)
pag.validate_number.return_value = 0
mocker.patch('haystack.views.Paginator', return_value=pag)
client = Client()
search = client.get(reverse('search') + query_string)
assert search.status_code == 200
if sort_by and sort_by != 'relevance':
# Make sure ordering was applied
assert order_func.call_count == 1
# Look for the emphasized button on the page signalling that this
# ordering key has been selected
button= '<strong>{sort_by}</strong>'.format(sort_by=sort_by.title())
assert button in search.content.decode('utf-8')
elif query or sort_by == 'relevance':
# When a query exists with no sort_by value, we default
# to ordering by `relevance` (hence `SearchQuerySet.order_by` will
# not get called)
order_func.assert_not_called()
else:
# When no query or sort_by values exist, we default to `date` ordering
assert order_func.call_count == 1
assert order_func.called_with('-last_action_date')
# Check that the ascending keyword got handled
if sort_by and sort_by != 'relevance': # Relevance doesn't display anything for ascending
if ascending:
assert 'fa-sort-amount-asc' in search.content.decode('utf-8')
else:
if sort_by == 'date':
# Descending is the default for Date
assert 'fa-sort-amount-desc' in search.content.decode('utf-8')
elif sort_by == 'title':
# Ascending is the default for Title
assert 'fa-sort-amount-asc' in search.content.decode('utf-8')
| [
"jean@jeancochrane.com"
] | jean@jeancochrane.com |
fa3c05b75bdac93c2ad02f1e4b234dbc2dc39fb2 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/9144ae424953dcd5cbab180fb68ad51108249353-<srccoms_extract>-bug.py | 70228bce66c7542f656b0ac20014a89b3610376c | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,891 | py | def srccoms_extract(srcfile, status_all, wlist):
'\n Given a source file ``srcfile``, this function will\n extract its API(doc comments) and run sample codes in the\n API.\n\n Args:\n srcfile(file): the source file\n status_all(dict): record all the sample code execution states.\n wlist(list): white list\n\n Returns:\n\n string: the length of __all__ list in srcfile versus the exact number of\n analysed API to make sure no API is missed in this srcfile and it\n is useful for statistic practices.\n '
srcc = srcfile.read()
srcfile.seek(0, 0)
srcls = srcfile.readlines()
allidx = srcc.find('__all__')
if (allidx != (- 1)):
alllist = []
if (srcfile.name.find('ops.py') != (- 1)):
for ai in range(0, len(srcls)):
if srcls[ai].startswith('__all__'):
lb = srcls[ai].find('[')
rb = srcls[ai].find(']')
if (lb == (- 1)):
continue
allele = srcls[ai][(lb + 1):rb].replace("'", '').replace(' ', '').replace('"', '')
alllist.append(allele)
if ('' in alllist):
alllist.remove('')
else:
alllist_b = (allidx + len('__all__'))
allstr = srcc[((alllist_b + srcc[alllist_b:].find('[')) + 1):(alllist_b + srcc[alllist_b:].find(']'))]
allstr = allstr.replace('\n', '').replace(' ', '').replace("'", '').replace('"', '')
alllist = allstr.split(',')
if ('' in alllist):
alllist.remove('')
api_alllist_count = len(alllist)
api_count = 0
handled = []
if (srcfile.name.find('ops.py') != (- 1)):
for i in range(0, len(srcls)):
if (srcls[i].find('__doc__') != (- 1)):
opname = srcls[i][:(srcls[i].find('__doc__') - 1)]
if (opname in wlist):
status_all[((srcfile.name + '/') + opname)] = [(- 2)]
continue
comstart = i
for j in range(i, len(srcls)):
if (srcls[j].find('"""') != (- 1)):
comstart = i
opcom = ''
for j in range((comstart + 1), len(srcls)):
opcom += srcls[j]
if (srcls[j].find('"""') != (- 1)):
break
status = sampcd_extract_and_run(opcom, opname, 'def', opname)
api_count += 1
status_all[((srcfile.name + '/') + opname)] = status
handled.append(opname)
for i in range(0, len(srcls)):
if srcls[i].startswith('def '):
f_header = srcls[i].replace(' ', '')
fn = f_header[len('def'):f_header.find('(')]
if (fn in handled):
continue
if (fn in alllist):
api_count += 1
if ((fn in wlist) or (((fn + '@') + srcfile.name) in wlist)):
status_all[((srcfile.name + '/') + fn)] = [(- 2)]
continue
fcombody = single_defcom_extract(i, srcls)
if (fcombody == ''):
print_header('def', fn)
print('WARNING: no comments in function ', fn, ', but it deserves.')
status_all[((srcfile.name + '/') + fn)] = [(- 1)]
print(status_all[((srcfile.name + '/') + fn)])
continue
else:
status = sampcd_extract_and_run(fcombody, fn, 'def', fn)
status_all[((srcfile.name + '/') + fn)] = status
if srcls[i].startswith('class '):
c_header = srcls[i].replace(' ', '')
cn = c_header[len('class'):c_header.find('(')]
if (cn in handled):
continue
if (cn in alllist):
api_count += 1
if ((cn in wlist) or (((cn + '@') + srcfile.name) in wlist)):
status_all[((srcfile.name + '/') + cn)] = [(- 2)]
continue
classcom = single_defcom_extract(i, srcls, True)
if (classcom != ''):
status = sampcd_extract_and_run(classcom, cn, 'class', cn)
status_all[((srcfile.name + '/') + cn)] = status
else:
print('WARNING: no comments in class itself ', cn, ', but it deserves.\n')
status_all[((srcfile.name + '/') + cn)] = [(- 1)]
print(status_all[((srcfile.name + '/') + cn)])
for x in range((i + 1), len(srcls)):
if (srcls[x].startswith('def ') or srcls[x].startswith('class ')):
break
else:
srcls[x] = srcls[x].replace('\t', ' ')
if srcls[x].startswith(' def '):
thisl = srcls[x]
indent = (len(thisl) - len(thisl.lstrip()))
mn = thisl[(indent + len('def ')):thisl.find('(')]
name = ((cn + '.') + mn)
if mn.startswith('_'):
continue
if ((name in wlist) or (((name + '@') + srcfile.name) in wlist)):
status_all[((srcfile.name + '/') + name)] = [(- 2)]
continue
thismethod = [thisl[indent:]]
for y in range((x + 1), len(srcls)):
srcls[y] = srcls[y].replace('\t', ' ')
if (srcls[y].startswith('def ') or srcls[y].startswith('class ')):
break
elif srcls[y].startswith(' def '):
break
else:
thismethod.append(srcls[y][indent:])
thismtdcom = single_defcom_extract(0, thismethod)
if (thismtdcom != ''):
status = sampcd_extract_and_run(thismtdcom, name, 'method', name)
status_all[((srcfile.name + '/') + name)] = status
return [((srcfile.name + ' all list length: ') + str(api_alllist_count)), ('analysed api count: ' + str(api_count))] | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
13253bbcb5a61616a6cb69054f8269129190e2ae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/337/usersdata/280/100565/submittedfiles/diagonaldominante.py | f05a7e40031566c52ae540227f222395e0e02f05 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # -*- coding: utf-8 -*-
import numpy as np
n=int(input("Digite n: "))
matriz=np.empty([n,n])
diag=np.empty([1,n])
cont=0
for i in range(0,n,1):
for j in range(0,n,1):
matriz[i][j]=int(input("Insira um valor: "))
for i in range(0,n,1):
diag[0][i]=matriz[i][i]
print(matriz)
print(diag)
for i in range(0,n,1):
if diag[0][i] > sum(matriz[i]):
cont=cont+1
if cont==n:
print("SIM")
else:
print("NAO")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8311b5e7ac60b4884e1f00810a0af2d768c7ca97 | 7a20895c1a5ebe80bfffd6072d2efaa27892b4df | /base_challenge.py | e99d3d082accabd4ebf9723af2b30944ee52694e | [] | no_license | rhlobo/hackerhank_challenges | 609cf35cff04ef8cf34322418d86896d4057d8b0 | 18d75200587d9b259a84bcbbff6cfcdd1c91d6a8 | refs/heads/master | 2018-12-29T14:13:44.606310 | 2014-11-27T16:13:00 | 2014-11-27T16:13:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | #!/usr/bin/env python
import tester
tester.configure()
'''
https://www.hackerrank.com/challenges/XXX
'''
import XXX
def solve(x):
pass
T = int(raw_input())
for _ in xrange(T):
args = [int(x) for x in raw_input().split()]
print solve(*args)
| [
"rhlobo+github@gmail.com"
] | rhlobo+github@gmail.com |
afa240d770a9b1b6bed14b5b46610250143b3006 | 28779d02314089e0a70a91cc1db875cd5024a395 | /tfx/orchestration/kubeflow/v2/kubeflow_v2_dag_runner.py | d5a9835313af31580c8b92a0124df1209ea97512 | [
"Apache-2.0"
] | permissive | voquangtuong/tfx | 4a2b41f6fd1238c8ae59f2080948d451b7668aba | ce2fd6fbde9845cd837c47089c3d6db2f87007b9 | refs/heads/master | 2023-04-10T21:25:06.404137 | 2021-04-20T03:13:12 | 2021-04-20T03:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,252 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""V2 Kubeflow DAG Runner."""
import datetime
import json
import os
from typing import Any, Dict, List, Optional, Text
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow.v2 import pipeline_builder
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.utils import deprecation_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
from google.protobuf import json_format
_KUBEFLOW_TFX_CMD = (
'python', '-m',
'tfx.orchestration.kubeflow.v2.container.kubeflow_v2_run_executor')
# Current schema version for the API proto.
_SCHEMA_VERSION = '2.0.0'
# Default TFX container image/commands to use in KubeflowV2DagRunner.
_KUBEFLOW_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
def _get_current_time():
"""Gets the current timestamp."""
return datetime.datetime.now()
class KubeflowV2DagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration specific to execution on Kubeflow pipelines."""
def __init__(self,
project_id: Text,
display_name: Optional[Text] = None,
default_image: Optional[Text] = None,
default_commands: Optional[List[Text]] = None,
**kwargs):
"""Constructs a Kubeflow V2 runner config.
Args:
project_id: GCP project ID to be used.
display_name: Optional human-readable pipeline name. Defaults to the
pipeline name passed into `KubeflowV2DagRunner.run()`.
default_image: The default TFX image to be used if not overriden by per
component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
**kwargs: Additional args passed to base PipelineConfig.
"""
super(KubeflowV2DagRunnerConfig, self).__init__(**kwargs)
self.project_id = project_id
self.display_name = display_name
self.default_image = default_image or _KUBEFLOW_TFX_IMAGE
if default_commands is None:
self.default_commands = _KUBEFLOW_TFX_CMD
else:
self.default_commands = default_commands
class KubeflowV2DagRunner(tfx_runner.TfxRunner):
"""Kubeflow V2 pipeline runner.
Builds a pipeline job spec in json format based on TFX pipeline DSL object.
"""
def __init__(self,
config: KubeflowV2DagRunnerConfig,
output_dir: Optional[Text] = None,
output_filename: Optional[Text] = None):
"""Constructs an KubeflowV2DagRunner for compiling pipelines.
Args:
config: An KubeflowV2DagRunnerConfig object to specify runtime
configuration when running the pipeline in Kubeflow.
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. The file output format will be a JSON-serialized PipelineJob pb
message. Defaults to 'pipeline.json'.
"""
if not isinstance(config, KubeflowV2DagRunnerConfig):
raise TypeError('config must be type of KubeflowV2DagRunnerConfig.')
super(KubeflowV2DagRunner, self).__init__()
self._config = config
self._output_dir = output_dir or os.getcwd()
self._output_filename = output_filename or 'pipeline.json'
def run(self,
pipeline: tfx_pipeline.Pipeline,
parameter_values: Optional[Dict[Text, Any]] = None,
write_out: Optional[bool] = True) -> Dict[Text, Any]:
"""Compiles a pipeline DSL object into pipeline file.
Args:
pipeline: TFX pipeline object.
parameter_values: mapping from runtime parameter names to its values.
write_out: set to True to actually write out the file to the place
designated by output_dir and output_filename. Otherwise return the
JSON-serialized pipeline job spec.
Returns:
Returns the JSON pipeline job spec.
Raises:
RuntimeError: if trying to write out to a place occupied by an existing
file.
"""
# TODO(b/166343606): Support user-provided labels.
# TODO(b/169095387): Deprecate .run() method in favor of the unified API
# client.
display_name = (
self._config.display_name or pipeline.pipeline_info.pipeline_name)
pipeline_spec = pipeline_builder.PipelineBuilder(
tfx_pipeline=pipeline,
default_image=self._config.default_image,
default_commands=self._config.default_commands).build()
pipeline_spec.sdk_version = 'tfx-{}'.format(version.__version__)
pipeline_spec.schema_version = _SCHEMA_VERSION
runtime_config = pipeline_builder.RuntimeConfigBuilder(
pipeline_info=pipeline.pipeline_info,
parameter_values=parameter_values).build()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'kubeflow_v2'}):
result = pipeline_pb2.PipelineJob(
display_name=display_name or pipeline.pipeline_info.pipeline_name,
labels=telemetry_utils.get_labels_dict(),
runtime_config=runtime_config)
result.pipeline_spec.update(json_format.MessageToDict(pipeline_spec))
pipeline_json_dict = json_format.MessageToDict(result)
if write_out:
if fileio.exists(self._output_dir) and not fileio.isdir(self._output_dir):
raise RuntimeError('Output path: %s is pointed to a file.' %
self._output_dir)
if not fileio.exists(self._output_dir):
fileio.makedirs(self._output_dir)
with fileio.open(
os.path.join(self._output_dir, self._output_filename), 'wb') as f:
f.write(json.dumps(pipeline_json_dict, sort_keys=True))
return pipeline_json_dict
compile = deprecation_utils.deprecated_alias(
deprecated_name='compile', name='run', func_or_class=run)
| [
"tensorflow-extended-nonhuman@googlegroups.com"
] | tensorflow-extended-nonhuman@googlegroups.com |
e071c6d32cf23a91804981e1c9fbd363874efec7 | d996edcd595c565c5725a16286ce8d338af67246 | /src/rl/qlearning/sarsa_agent.py | 6204f1b366a0ea9e6d541dbc86fc6eab2e9efaf8 | [] | no_license | preddy5/dltemplate | fbbfce7660c451495e255cf8d8437e4b4e207f9c | 77b04b767cbd4914e0a3d3609c645e475aabcc43 | refs/heads/master | 2020-04-28T19:37:04.893001 | 2019-03-13T13:35:04 | 2019-03-13T13:35:04 | 175,517,056 | 1 | 1 | null | 2019-03-13T23:59:40 | 2019-03-13T23:59:39 | null | UTF-8 | Python | false | false | 1,071 | py | import numpy as np
from rl.qlearning.qlearning_agent import QLearningAgent
class EVSarsaAgent(QLearningAgent):
"""
An agent that changes some of its q-learning functions to implement
Expected Value SARSA.
"""
def get_value(self, state):
"""
Returns Vpi for current state under epsilon-greedy policy:
V_{pi}(s) = sum _{over a_i} {pi(a_i | s) * Q(s, a_i)}
:param state:
:return:
"""
epsilon = self.epsilon
possible_actions = self.get_legal_actions(state)
n_actions = len(possible_actions)
if n_actions == 0:
return 0.
q_values = [self.get_q_value(state, action) for action in possible_actions]
best_action_idx = np.argmax(q_values)
expected_value = 0.
for i in range(n_actions):
if i == best_action_idx:
expected_value += (1 - epsilon + epsilon / n_actions) * q_values[i]
else:
expected_value += (epsilon / n_actions) * q_values[i]
return expected_value
| [
"markmo@me.com"
] | markmo@me.com |
aaefa16d38f2d7ea1dc21425da2ace84fc7f1b87 | 058498e815b20950cc97033c2e4e55c732c3f909 | /tempest/lib/api_schema/response/compute/v2_1/floating_ips.py | 0c665905fe34dde28020cc2c8839f64852e8e61f | [
"Apache-2.0"
] | permissive | cisco-openstack/tempest | 49c56de4ee2422791fe5cd832083d7b6558c7d0d | 0bc47dbdd05b5d12d048c09800515c2bd03a16ce | refs/heads/proposed | 2021-01-22T00:11:00.113774 | 2020-06-26T09:32:55 | 2020-06-26T09:32:55 | 24,151,261 | 2 | 5 | Apache-2.0 | 2020-08-07T06:13:20 | 2014-09-17T15:46:17 | Python | UTF-8 | Python | false | false | 4,677 | py | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
common_floating_ip_info = {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but
# here allows 'string' also because we will be
# able to change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
'instance_id': {'type': ['string', 'null']},
'ip': parameter_types.ip_address,
'fixed_ip': parameter_types.ip_address
},
'additionalProperties': False,
'required': ['id', 'pool', 'instance_id',
'ip', 'fixed_ip'],
}
list_floating_ips = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips': {
'type': 'array',
'items': common_floating_ip_info
},
},
'additionalProperties': False,
'required': ['floating_ips'],
}
}
create_get_floating_ip = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip': common_floating_ip_info
},
'additionalProperties': False,
'required': ['floating_ip'],
}
}
list_floating_ip_pools = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_pools': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'}
},
'additionalProperties': False,
'required': ['name'],
}
}
},
'additionalProperties': False,
'required': ['floating_ip_pools'],
}
}
add_remove_floating_ip = {
'status_code': [202]
}
create_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips_bulk_create': {
'type': 'object',
'properties': {
'interface': {'type': ['string', 'null']},
'ip_range': {'type': 'string'},
'pool': {'type': ['string', 'null']},
},
'additionalProperties': False,
'required': ['interface', 'ip_range', 'pool'],
}
},
'additionalProperties': False,
'required': ['floating_ips_bulk_create'],
}
}
delete_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips_bulk_delete': {'type': 'string'}
},
'additionalProperties': False,
'required': ['floating_ips_bulk_delete'],
}
}
list_floating_ips_bulk = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_info': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'address': parameter_types.ip_address,
'instance_uuid': {'type': ['string', 'null']},
'interface': {'type': ['string', 'null']},
'pool': {'type': ['string', 'null']},
'project_id': {'type': ['string', 'null']},
'fixed_ip': parameter_types.ip_address
},
'additionalProperties': False,
# NOTE: fixed_ip is introduced after JUNO release,
# So it is not defined as 'required'.
'required': ['address', 'instance_uuid', 'interface',
'pool', 'project_id'],
}
}
},
'additionalProperties': False,
'required': ['floating_ip_info'],
}
}
| [
"mtreinish@kortar.org"
] | mtreinish@kortar.org |
a69a45a82ed766a21c7056d57f6569f2d180450f | fe03eab6477db3f7f3667eefe369dd89d457a58f | /Run2Demonstrator/milliqanScripts/timeCalibration/plotTripleCoincidence.py | f94e68768254ded82d8cb2888577b3914042c347 | [] | no_license | milliQan-sw/milliqanOffline | c64a56173797cd00b9243e4ca7196515081834e6 | 4df6cb4a16ea88feea82b99f9f8c356dbce863cc | refs/heads/master | 2023-08-23T13:39:17.513848 | 2023-08-17T20:16:20 | 2023-08-17T20:16:20 | 123,949,343 | 3 | 7 | null | 2023-09-13T06:59:02 | 2018-03-05T16:47:13 | Python | UTF-8 | Python | false | false | 3,871 | py | #!/usr/local/bin/python
import ROOT as r
import pickle
import os,sys
import pandas as pd
r.gROOT.SetBatch(True)
r.gStyle.SetOptFit(0)
# inputFile = r.TFile('recalibTree.root')
inputFile = r.TFile('/Users/mcitron/milliqanScripts/realTripleCoinc.root')
# inputFile = r.TFile('AllTripleCoincidenceNominalHVNov7_v2.root')
tempCanvas = r.TCanvas()
tempCanvas.cd()
oldtree = inputFile.Get('t')
bins = '(40,{0},{1})'.format(-30,20)
oldtree.Draw("MinIf$(time_module_calibrated,(((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75)))-MinIf$(time_module_calibrated,(layer==1&&nPE>=200))>>noBeam"+bins,"MinIf$(time_module_calibrated,((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75))>0&&MinIf$(time_module_calibrated,layer==1&&nPE>=200)>0&&!beam","")
histNoBeam = tempCanvas.GetListOfPrimitives()[0]
oldtree.Draw("MinIf$(time_module_calibrated,((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75))-MinIf$(time_module_calibrated,(layer==1&&nPE>=200))>>beam"+bins,"MinIf$(time_module_calibrated,((chan==8||chan==9)&&nPE>=200)||((chan==11||chan==10)&&nPE>=75))>0&&MinIf$(time_module_calibrated,layer==1&&nPE>=200)>0&&beam","")
histBeam = tempCanvas.GetListOfPrimitives()[0]
histBeamBackgroundSubtract = histBeam.Clone('backgroundSubtract')
histNoBeamToSubtract = histNoBeam.Clone()
histNoBeamToSubtract.Scale(1.97)
histBeamBackgroundSubtract.Add(histNoBeamToSubtract,-1)
gNoBeam = r.TF1('gNoBeam','gaus',-30,20)
gBeam = r.TF1('gBeam','gaus',5,15)
histNoBeam.Fit(gNoBeam,"R")
print gNoBeam.GetProb()
histNoBeam.Draw()
tempCanvas.SaveAs("noBeam.pdf")
tempCanvas.Clear()
histBeamBackgroundSubtract.Fit(gBeam,"R")
print gBeam.GetProb()
histBeamBackgroundSubtract.Draw()
tempCanvas.SaveAs("backgroundSubtract.pdf")
tempCanvas.Clear()
doubleG = r.TF1("doubleG","gaus(0)+gaus(3)",-30,20)
doubleG.SetParameter(0,gNoBeam.GetParameter(0))
doubleG.SetParameter(1,gNoBeam.GetParameter(1))
doubleG.SetParameter(2,gNoBeam.GetParameter(2))
doubleG.SetParameter(3,gBeam.GetParameter(0))
doubleG.SetParameter(4,gBeam.GetParameter(1))
doubleG.SetParameter(5,gBeam.GetParameter(2))
histBeam.Fit(doubleG,"R")
gNoBeam.SetParameter(0,doubleG.GetParameter(0))
gNoBeam.SetParameter(1,doubleG.GetParameter(1))
gNoBeam.SetParameter(2,doubleG.GetParameter(2))
print doubleG.GetProb()
tempCanvas.Clear()
histBeam.Draw()
gNoBeam.SetLineColor(r.kBlue)
gNoBeam.Draw("same")
tempCanvas.SaveAs("mess.pdf")
# newFile = r.TFile('skimmed.root','recreate')
# jaeData = pd.read_csv('dataJae.txt',sep=' ')
# newtree = oldtree.CloneTree(0);
# threetimes = []
# for entry in oldtree:
# minTimeLayer1 = 9999
# minTimeLayer2 = 9999
# minTimeLayer3 = 9999
# for iT in range(len(entry.time_module_calibrated)):
# if entry.nPE[iT] >= 100:
# if entry.layer[iT] == 1:
# if entry.time_module_calibrated[iT] < minTimeLayer1:
# minTimeLayer1 = entry.time_module_calibrated[iT]
# elif entry.layer[iT] == 2:
# if entry.time_module_calibrated[iT] < minTimeLayer2:
# minTimeLayer2 = entry.time_module_calibrated[iT]
# elif entry.layer[iT] == 3:
# if entry.time_module_calibrated[iT] < minTimeLayer3:
# minTimeLayer3 = entry.time_module_calibrated[iT]
# if all(x != 9999 for x in [minTimeLayer1,minTimeLayer2,minTimeLayer3]):
# threetimes.append([minTimeLayer1,minTimeLayer2,minTimeLayer3])
# # if len(jaeData.loc[(jaeData["run"]==entry.run) & (jaeData["file"] == entry.file) & (jaeData["event"] == entry.event)]) != 0:
# # newtree.Fill()
# # if (entry.run in jaeData["run"]):
# # if (entry.file in jaeData[jaeData["run"] == entry.run]"file"]):
# # if (entry.event in jaeData["event"]):
# # newtree.Fill()
# pickle.dump(threetimes,open("threetimes.pkl","w"))
| [
"mcitron@cern.ch"
] | mcitron@cern.ch |
89a5ee9e158505e8dec2b66c3914cb369c48eb39 | 1e7ce1c56f3030aa6df1e928bab559f50c59bad5 | /homepage/tests.py | c7d12c23566f23ea318652893201412b06dcf0c7 | [] | no_license | AIRob/WxRobot | f7fe37331c399a9d7fb467c7e913f10cc981f8eb | b27a48edb44694d4faa349d68d9b753fe4063276 | refs/heads/master | 2020-06-05T04:53:11.310909 | 2019-05-17T06:46:30 | 2019-05-17T06:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | <<<<<<< HEAD
from django.test import TestCase
# Create your tests here.
=======
from django.test import TestCase
# Create your tests here.
>>>>>>> acb8c86e5915306157008056c793ddc27ee3fd97
| [
"1194681498@qq.com"
] | 1194681498@qq.com |
50351090dc204e3bf9d7ee545c28949f6f6d83bc | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/551.py | cf4c418fdeef20399f363ec5314f670826471da8 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # -*- coding: utf-8 -*-
import sys
class Recycle:
def solve(self,low,high):
ans=0
for offset in range(high+1-low):
num = low+offset
num_str = list("%d"%num)
for i in range(len(num_str)-1):
modified_str_list = num_str[i+1:] + num_str[0:i+1]
if modified_str_list[0] == '0':
continue
else:
modified_str = "".join(modified_str_list)
modified_num = int(modified_str)
if num != modified_num and modified_num >= low and modified_num <= high:
ans+=1
#print ans_dict
return ans/2
recycle=Recycle()
f=open(sys.argv[1])
f2=open(sys.argv[2],'w')
lines=f.read().split('\n')
for idx in range(int(lines[0])):
params = [int(param) for param in (lines[idx+1]).split(' ')]
low=params.pop(0)
high=params.pop(0)
ans = recycle.solve(low,high)
f2.write("Case #%d: %d\n"%(idx+1,ans))
f2.close
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8e3302d62e49391b32e25a39d6ddb24d0aa64cf2 | 4650ddcf27ddf908c1f38702fe4491df92232905 | /v1/my_site/models.py | ce740596944e1772fdef7c5b105e5c6e515dc0a1 | [] | no_license | volitilov/webface | 0e765232544496fcb1527175cc51e6287ed2797e | 7141c47fe9e86337183faea42be92663ef24afd9 | refs/heads/master | 2020-12-30T15:55:15.023542 | 2017-06-04T18:30:36 | 2017-06-04T18:30:36 | 91,187,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | from django.db import models
from django.utils import timezone
from django.conf import settings
class Category(models.Model):
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
title = models.CharField(max_length=30, verbose_name='Название', unique=True)
def __str__(self):
return self.title
class Mesage(models.Model):
class Meta():
verbose_name = 'Сообщение'
verbose_name_plural = 'Сообщения'
name = models.CharField(max_length=20, verbose_name='Имя:')
email = models.EmailField(verbose_name='Почт. ящик:')
website = models.URLField(verbose_name='Сайт:')
text = models.TextField(verbose_name='Сообщение:')
def __str__(self):
return self.name
class PortfolioItem(models.Model):
class Meta():
verbose_name = 'Работа'
verbose_name_plural = 'Работы'
category = models.ManyToManyField(Category, verbose_name='Категория(и):')
title = models.CharField(max_length=200, verbose_name='Название:')
description = models.TextField(verbose_name='Описание:')
img_small = models.ImageField(upload_to='portfolio/%d-%m-%Y', verbose_name='Мал. изображение:')
img_big = models.ImageField(upload_to='portfolio/%d-%m-%Y', verbose_name='Бол. изображение:')
project_url = models.URLField(max_length=200, verbose_name='Ссылка проекта:')
download = models.URLField(max_length=300, verbose_name='Ссылка скачки:')
created_date = models.DateTimeField(default=timezone.now, verbose_name='Дата создания:')
published_date = models.DateTimeField(blank=True, null=True, verbose_name='Дата публикации:')
likes = models.IntegerField(default=0, verbose_name='Кол. лайков:')
previews = models.IntegerField(default=0, verbose_name='Кол. просмотров:')
def __str__(self):
return self.title
| [
"volitilov@gmail.com"
] | volitilov@gmail.com |
f930b669bdff8766d9fe09ddcf376ca7cb482bdc | c68aea1de91b46ae684792123c61e84c44ea0266 | /code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/distutils/tests/test_config.py | a3844974f2ad2b3535ec7a830b7ad4b928887f54 | [
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Winfredemalx54/algorithm-challenger-1 | 12e23bed89ca889701db1b17ac540ce62ce86d8e | 761c2c39e041fb155f853385998d5c6318a39913 | refs/heads/master | 2022-11-22T15:03:01.548605 | 2020-07-11T12:26:31 | 2020-07-11T12:26:31 | 297,955,141 | 3 | 0 | Apache-2.0 | 2020-09-23T11:58:19 | 2020-09-23T11:58:18 | null | UTF-8 | Python | false | false | 3,870 | py | """Tests for distutils.pypirc.pypirc."""
import sys
import os
import unittest
import tempfile
from distutils.core import PyPIRCCommand
from distutils.core import Distribution
from distutils.log import set_threshold
from distutils.log import WARN
from distutils.tests import support
from test.support import run_unittest
PYPIRC = """\
[distutils]
index-servers =
server1
server2
server3
[server1]
username:me
password:secret
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
[server3]
username:cbiggles
password:yh^%#rest-of-my-password
"""
PYPIRC_OLD = """\
[server-login]
username:tarek
password:secret
"""
WANTED = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:xxx
"""
class BasePyPIRCCommandTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
"""Patches the environment."""
super(BasePyPIRCCommandTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
os.environ['HOME'] = self.tmp_dir
self.rc = os.path.join(self.tmp_dir, '.pypirc')
self.dist = Distribution()
class command(PyPIRCCommand):
def __init__(self, dist):
PyPIRCCommand.__init__(self, dist)
def initialize_options(self):
pass
finalize_options = initialize_options
self._cmd = command
self.old_threshold = set_threshold(WARN)
def tearDown(self):
"""Removes the patch."""
set_threshold(self.old_threshold)
super(BasePyPIRCCommandTestCase, self).tearDown()
class PyPIRCCommandTestCase(BasePyPIRCCommandTestCase):
def test_server_registration(self):
# This test makes sure PyPIRCCommand knows how to:
# 1. handle several sections in .pypirc
# 2. handle the old format
# new format
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/'),
('server', 'server1'), ('username', 'me')]
self.assertEqual(config, waited)
# old format
self.write_file(self.rc, PYPIRC_OLD)
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEqual(config, waited)
def test_server_empty_registration(self):
cmd = self._cmd(self.dist)
rc = cmd._get_rc_file()
self.assertFalse(os.path.exists(rc))
cmd._store_pypirc('tarek', 'xxx')
self.assertTrue(os.path.exists(rc))
f = open(rc)
try:
content = f.read()
self.assertEqual(content, WANTED)
finally:
f.close()
def test_config_interpolation(self):
# using the % character in .pypirc should not raise an error (#20120)
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
cmd.repository = 'server3'
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'yh^%#rest-of-my-password'), ('realm', 'pypi'),
('repository', 'https://upload.pypi.org/legacy/'),
('server', 'server3'), ('username', 'cbiggles')]
self.assertEqual(config, waited)
def test_suite():
return unittest.makeSuite(PyPIRCCommandTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| [
"bater.makhabel@gmail.com"
] | bater.makhabel@gmail.com |
f834375011f70f9856b73cfc2d76bbf75e4eda72 | 0b144606d44067d6160f1143a759fe7be1a67a93 | /spacy/lang/fa/lex_attrs.py | 99b8e27878bc52ac2dddbf7e67b24da9f088e1f1 | [
"MIT"
] | permissive | huaxz1986/spaCy | a64cba5eb0390068b48a8d4a49c6e8a8b0c22063 | c3c064ace40f4f310d2d220b54b4dc0325a1e3ba | refs/heads/master | 2023-06-09T19:20:46.409792 | 2023-06-02T12:29:52 | 2023-06-02T12:29:52 | 145,535,112 | 2 | 2 | MIT | 2021-07-09T08:11:47 | 2018-08-21T08:50:18 | Python | UTF-8 | Python | false | false | 1,386 | py | from ...attrs import LIKE_NUM
MIM = "م"
ZWNJ_O_MIM = "ام"
YE_NUN = "ین"
_num_words = set(
"""
صفر
یک
دو
سه
چهار
پنج
شش
شیش
هفت
هشت
نه
ده
یازده
دوازده
سیزده
چهارده
پانزده
پونزده
شانزده
شونزده
هفده
هجده
هیجده
نوزده
بیست
سی
چهل
پنجاه
شصت
هفتاد
هشتاد
نود
صد
یکصد
یکصد
دویست
سیصد
چهارصد
پانصد
پونصد
ششصد
شیشصد
هفتصد
هفصد
هشتصد
نهصد
هزار
میلیون
میلیارد
بیلیون
بیلیارد
تریلیون
تریلیارد
کوادریلیون
کادریلیارد
کوینتیلیون
""".split()
)
_ordinal_words = set(
"""
اول
سوم
سیام""".split()
)
_ordinal_words.update({num + MIM for num in _num_words})
_ordinal_words.update({num + ZWNJ_O_MIM for num in _num_words})
_ordinal_words.update({num + YE_NUN for num in _ordinal_words})
def like_num(text):
"""
check if text resembles a number
"""
text = (
text.replace(",", "")
.replace(".", "")
.replace("،", "")
.replace("٫", "")
.replace("/", "")
)
if text.isdigit():
return True
if text in _num_words:
return True
if text in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| [
"noreply@github.com"
] | huaxz1986.noreply@github.com |
c33efb9b294913ee7c0aaf0b4a477a1f9f6da1a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04031/s911055242.py | 2a1826d064d99651357766b4f3af57ce3138f525 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | N=int(input())
*A,=map(int,input().split())
mx=max(A)
mn=min(A)
ans=float('inf')
for i in range(mn,mx+1):
ans=min(ans, sum([(k-i)**2 for k in A]))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1f4558a5b374477c18f6a0f04940c2e603d2344e | 0c1e0e5bda62ef78ad08bfea4806677bacaa89fd | /handlers/handlers/pattern.py | c52e35f756a1173211061def2c25ccd9fc5c51f1 | [
"BSD-3-Clause"
] | permissive | adammck/rapidsms-contrib-apps-dev | fb9d47e94e562c03053229fc8226002ba29994cf | b6ef8c9c3726ca0b706b984b93342e24c62fd430 | refs/heads/master | 2020-12-24T21:36:25.118552 | 2010-06-16T17:59:37 | 2010-06-16T17:59:37 | 722,199 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
from .base import BaseHandler
class PatternHandler(BaseHandler):
@classmethod
def _pattern(cls):
if hasattr(cls, "pattern"):
return re.compile(cls.pattern, re.IGNORECASE)
@classmethod
def dispatch(cls, router, msg):
pattern = cls._pattern()
if pattern is None:
return False
match = pattern.match(msg.text)
if match is None:
return False
cls(router, msg).handle(*match.groups())
return True
| [
"adam.mckaig@gmail.com"
] | adam.mckaig@gmail.com |
9e153351e31534753951fd99189961e27ffe9bac | 53181572c4b22df4b569a9901bcd5347a3459499 | /ceit_191221/py200509_zhou/module_app.py | 7dd4e30336f697cc8816410ec7f0136415944b69 | [] | no_license | edu-athensoft/ceit4101python_student | 80ef067b77421fce76d04f778d5c6de8b12f676c | 33cfa438c062d45e8d246b853e93d3c14b92ff2d | refs/heads/master | 2020-07-30T01:04:21.084384 | 2020-07-27T02:21:57 | 2020-07-27T02:21:57 | 210,027,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | """
"""
import math
import ceit_191221.py200509_zhou.module_arithmetic as arithm
import ceit_191221.py200509_zhou.module_logic as logic
print(math.sqrt(36))
print(math.e)
print("=== test arithmetic module ===")
print(arithm.add(1,2))
print(arithm.sub(1,2))
print(arithm.PI)
print(arithm.e)
print("=== test logic module ===")
print(logic.logic_and(True, False))
print(logic.logic_or(5>3, 3>4))
print(logic.logic_not(1!=1))
| [
"inf.athensoft@hotmail.com"
] | inf.athensoft@hotmail.com |
cfcb1f1d9d54c9b98db16f1e70bb0452ad49661b | 61747f324eaa757f3365fd7bf5ddd53ea0db47d1 | /casepro/contacts/migrations/0012_field_is_visible.py | 2d3bc7e266cf8daf0d1229fddf9e9e442fa74db0 | [
"BSD-3-Clause"
] | permissive | BlueRidgeLabs/casepro | f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12 | 8ef509326f3dfa80bb44beae00b60cc6c4ac7a24 | refs/heads/master | 2022-01-24T09:01:18.881548 | 2017-12-05T18:46:05 | 2017-12-05T18:49:42 | 113,502,588 | 0 | 0 | null | 2017-12-07T21:57:37 | 2017-12-07T21:57:37 | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0011_migrate_suspend_groups'),
]
operations = [
migrations.AddField(
model_name='field',
name='is_visible',
field=models.BooleanField(default=False, help_text='Whether this field is visible to partner users'),
),
]
| [
"rowanseymour@gmail.com"
] | rowanseymour@gmail.com |
a00d084afd5b74ce9fe9afdb606d0f19d08dae62 | ae56a9df49059c785e10e34b55ea827616950940 | /progress/models/iteration.py | a0aa1b0e5b1fbf251d3b81df0f97f7e397d834cd | [] | no_license | robinharms/Progress | 04a7ccc11f44378209b3492fad34e79ad7dc9e9a | ff31ed3a4a67e831636668d326d7218e0ff0e4b8 | refs/heads/master | 2021-01-15T13:11:27.267833 | 2011-11-13T15:49:06 | 2011-11-13T15:49:06 | 2,527,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from betahaus.pyracont import BaseFolder
from betahaus.pyracont.decorators import content_factory
from zope.interface import implements
from progress import ProgressMF as _
from progress.models.interfaces import IIteration
@content_factory('Iteration')
class Iteration(BaseFolder):
""" Iteration. """
implements(IIteration)
allowed_contexts = ('Project')
content_type = 'Iteration'
display_name = _(u"Iteration")
schemas = {'add':'iteration', 'edit':'iteration'}
| [
"robin@betahaus.net"
] | robin@betahaus.net |
2c8d4504af1d3a0bc4d263d58de39cbe265e4b23 | b8856b7ec201fad5621593f93b7a8d9844276234 | /models/warmtune_model.py | be3ba4b483ec8605ebee81059f2cb3e11f422ceb | [] | no_license | wangjianbing1998/RLLL | cbb2e7724e9a140115e70886b39423a7002e1715 | 413c83effe8dd60de756362b854fcab978fe530c | refs/heads/master | 2023-06-03T09:29:18.165851 | 2021-06-21T08:35:59 | 2021-06-21T08:35:59 | 336,836,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | from losses import create_loss
from models.base_model import BaseModel
from networks import create_net
class WarmtuneModel(BaseModel):
@staticmethod
def modify_commandline_options(parser):
"""Add new dataset-specific options, and rewrite default values for existing options.
num_classes is the number of classes per task
for example, num_classes = [10,10,10], means the number of classes on taks1 is 10, and then so on.
Parameters:
parser -- original option parser
Returns:
the modified parser.
"""
parser.add_argument('--net_name', type=str, default="alexnet", choices=["alexnet", "imagenet"],
help='network select from alexnet|imagenet', )
parser.add_argument('--loss_name', type=str, default="total", choices=["total"],
help='loss select from total', )
parser.add_argument('--taskdataset_name', type=str, default="total", choices=["total"],
help='loss from total', )
return parser
@staticmethod
def default_value(opt):
return opt
def __init__(self, opt):
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = []
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.net_names = ["main"]
self.net_main = create_net(opt)
self.loss_criterion = create_loss(opt)
self.init_optimizers(opt)
self.loss_names = getattr(self.loss_criterion, "loss_names")
"""
unfreeze shared_cnn_layers, shared_fc_layers and other_layers,
calculate other loss
not backward
"""
self.plus_other_loss = False
self.need_backward = False
self.max_step = 1
def setup(self, task_index=0, step=1):
if step == 1:
BaseModel.setup(self, task_index) # call the initialization method of BaseModel
self.shared_fc_layers = True
self.shared_cnn_layers = False
self.other_layers = False
self.task_layer = True
else:
raise ValueError(f'warmtune Expected 1<=step<={self.max_step}, but got {step}')
| [
"2553627958@qq.com"
] | 2553627958@qq.com |
e30064cad618b7334b32d7eca60aab02f3fda11c | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/4c03abfcd6c043968b6580de192f54d9.py | 960f45654af9d4c711d83ebf96542fe63d2a5e97 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 472 | py | # -*- coding: utf-8 -*-
def hey(userString):
"""
Takes a string, and returns a string that represents Bob's reply.
Args:
userString (string): a valid string.
Returns:
string: Bob's reply.
"""
if not userString or userString.isspace():
return "Fine. Be that way!"
elif userString.isupper():
return "Whoa, chill out!"
elif userString.endswith('?'):
return "Sure."
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1c7f31793ca39ebee732df11958237c8047a2673 | 97e534b26a76bf0d954e166841179979748bcfa2 | /objects/migrations/0053_claim.py | d7a2ba564d665e051111a4877f2f93d1ea61ffae | [] | no_license | mehdi1361/http_server | 3a8bd73ce44307ee2b7761d1211671ca8cb0f3ba | d8a962c55165ef0237bfb26d27d9cfa11a415a5d | refs/heads/develop | 2022-12-11T00:44:11.089407 | 2019-01-20T12:02:48 | 2019-01-20T12:02:48 | 166,656,299 | 0 | 0 | null | 2022-12-07T23:53:22 | 2019-01-20T12:02:05 | HTML | UTF-8 | Python | false | false | 1,519 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-27 07:15
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('objects', '0052_auto_20180626_1018'),
]
operations = [
migrations.CreateModel(
name='Claim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='created date')),
('params', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, verbose_name='params')),
('coin', models.PositiveIntegerField(default=10, verbose_name='coin')),
('gem', models.PositiveIntegerField(default=10, verbose_name='gem')),
('is_used', models.BooleanField(default=False, verbose_name='used')),
('league_player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='claims', to='objects.LeagueUser', verbose_name='league_user')),
],
options={
'db_table': 'claims',
'verbose_name': 'claim',
'verbose_name_plural': 'claims',
},
),
]
| [
"mhd.mosavi@gmail.com"
] | mhd.mosavi@gmail.com |
443c3d059b335a119f3da8698934ed9e86c8a915 | 0c68c989a8473801743c4f2d2087c223b2fd47fa | /2020_hashcode/practice/pizza.py | 97016dd9bdb9f3d6de66d64c983e089b25e4c9ef | [] | no_license | tabletenniser/Google_code_jam | 4a105e8e9bc27269430a1c90f4c7a59a4d4bc7a1 | 1c300e3b953e2e744a2c3bb963b5d2f46f9caee1 | refs/heads/master | 2021-06-12T22:10:23.675590 | 2021-03-27T22:25:06 | 2021-03-27T22:25:06 | 179,803,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | def get_opt_solution(max_slices, pizza_slices):
types_of_pizzas = len(pizza_slices)
prev_dp_table = [0 for _ in range(max_slices)]
prev_solution_table = [list() for _ in range(max_slices)]
for i in range(types_of_pizzas):
dp_table = [0 for _ in range(max_slices)]
solution_table = [list() for _ in range(max_slices)]
print(i)
for j in range(max_slices):
if pizza_slices[i] > j:
dp_table[j] = prev_dp_table[j]
solution_table[j] = prev_solution_table[j]
else:
dp_table_value_select = prev_dp_table[j-pizza_slices[i]] + pizza_slices[i]
if prev_dp_table[j] > dp_table_value_select:
dp_table[j] = prev_dp_table[j]
solution_table[j] = prev_solution_table[j]
else:
dp_table[j] = dp_table_value_select
solution_table[j] = prev_solution_table[j-pizza_slices[i]] + [i]
# print(dp_table)
# print(solution_table)
prev_dp_table = dp_table
prev_solution_table = solution_table
return solution_table[-1]
def get_approx_solution(max_slices, pizza_slices):
cur_clice_count = 0
cur_solution = []
types_of_pizzas = len(pizza_slices)
for i in range(types_of_pizzas-1, -1, -1):
p = pizza_slices[i]
if cur_clice_count + p < max_slices:
cur_clice_count += p
cur_solution.append(i)
cur_solution.sort()
return cur_solution
max_slices, pizza_types = [int(s) for s in input().split(" ")]
pizza_slices = [int(s) for s in input().split(" ")]
opt_sol=get_opt_solution(max_slices, pizza_slices)
print(len(opt_sol))
print(' '.join(map(str, opt_sol)))
| [
"tabletenniser@gmail.com"
] | tabletenniser@gmail.com |
1a47559dd1499bafa8598161835aa02a0e49cf0b | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/util/ProgramDiffFilter.pyi | d2835f42e1b5c7c90b7f8965f54c4cbdb0b67b07 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,593 | pyi | from typing import List
import ghidra.program.util
import java.lang
class ProgramDiffFilter(object):
"""
The ProgramDiffFilter is used when determining or working with
differences between two programs.
It indicates the types of program differences we are interested in.
Each difference type can be set to true, indicating interest in
differences of that type between two programs. False indicates no interest
in this type of program difference.
Valid filter types are:
BYTE_DIFFS, CODE_UNIT_DIFFS,
PLATE_COMMENT_DIFFS, PRE_COMMENT_DIFFS, EOL_COMMENT_DIFFS,
REPEATABLE_COMMENT_DIFFS, POST_COMMENT_DIFFS,
REFERENCE_DIFFS,
USER_DEFINED_DIFFS, BOOKMARK_DIFFS,
SYMBOL_DIFFS,
EQUATE_DIFFS, FUNCTION_DIFFS, PROGRAM_CONTEXT_DIFFS.
Predefined filter type combinations are:
COMMENT_DIFFS and ALL_DIFFS.
"""
ALL_DIFFS: int = 32767
BOOKMARK_DIFFS: int = 4096
BYTE_DIFFS: int = 2
CODE_UNIT_DIFFS: int = 4
COMMENT_DIFFS: int = 248
EOL_COMMENT_DIFFS: int = 8
EQUATE_DIFFS: int = 512
FUNCTION_DIFFS: int = 2048
FUNCTION_TAG_DIFFS: int = 16384
PLATE_COMMENT_DIFFS: int = 64
POST_COMMENT_DIFFS: int = 32
PRE_COMMENT_DIFFS: int = 16
PROGRAM_CONTEXT_DIFFS: int = 1
REFERENCE_DIFFS: int = 256
REPEATABLE_COMMENT_DIFFS: int = 128
SYMBOL_DIFFS: int = 1024
USER_DEFINED_DIFFS: int = 8192
@overload
def __init__(self):
"""
Creates new ProgramDiffFilter with none of the diff types selected.
"""
...
@overload
def __init__(self, type: int):
"""
Creates new ProgramDiffFilter with the specified diff types selected.
@param type one or more of the diff types "OR"ed together.
<BR>i.e. CODE_UNIT_DIFFS | SYMBOL_DIFFS
"""
...
@overload
def __init__(self, filter: ghidra.program.util.ProgramDiffFilter):
"""
Creates new ProgramDiffFilter equivalent to the specified ProgramDiffFilter.
@param filter the diff filter this one should equal.
"""
...
def addToFilter(self, filter: ghidra.program.util.ProgramDiffFilter) -> None:
"""
set this filter to look for types of differences in addition to those
types where it is already looking for differences.
The filter that is passed as a parameter indicates the additional types
of differences.
@param filter filter indicating the additional types of differences
to look for between the programs.
"""
...
def clearAll(self) -> None:
"""
Sets all the defined types of differences to false.
Filter indicates no interest in any difference types.
"""
...
def equals(self, obj: object) -> bool:
"""
Determines whether or not this filter is equal to the object that
is passed in.
@param obj the object to compare this one with.
@return true if the filter matches this one.
"""
...
def getClass(self) -> java.lang.Class: ...
def getFilter(self, type: int) -> bool:
"""
getFilter determines whether or not the specified type of filter is set.
@param type the set bits indicate the type of differences we want to
check as being set in the filter.
<BR>For example, one or more of the diff types "OR"ed together.
<BR>i.e. CODE_UNIT_DIFFS | SYMBOL_DIFFS
@return true if filtering for the specified type of differences.
"""
...
@staticmethod
def getPrimaryTypes() -> List[int]:
"""
Gets all the valid individual types of differences for this filter.
These are also referred to as primary difference types.
@return an array containing all the currently defined difference types
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def selectAll(self) -> None:
"""
Sets all the defined types of differences to true.
Filter indicates interest in all difference types.
"""
...
def setFilter(self, type: int, filter: bool) -> None:
"""
setFilter specifies whether or not the indicated type of difference will be
included by the filter (true) or not included (false).
@param type the set bits indicate the type of differences we want to
look for in the programs.
<BR>For example, one or more of the diff types "OR"ed together.
<BR>i.e. CODE_UNIT_DIFFS | SYMBOL_DIFFS
@param filter true if you want to determine differences of the specified type.
"""
...
def toString(self) -> unicode:
"""
Returns a string representation of the current settings for this filter.
"""
...
@staticmethod
def typeToName(type: int) -> unicode:
"""
<CODE>typeToName()</CODE> returns the name of the difference type.
Only predefined types, as specified in <CODE>ProgramDiffFilter</CODE>,
will return a name. Otherwise, an empty string is returned.
@param type the type of difference whose name is wanted.
@return the name of the predefined difference type. Otherwise, the empty string.
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
de767a446ec4679f03d7c8f8350383eb74a5ea8e | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OEFModel/Classes/AWeg.py | fdc3b0800ca5134a20c84931626c320d54719df4 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 325 | py | # coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
# Generated with OEFClassCreator. To modify: extend, do not edit
class AWeg(EMObject):
"""Wegen : A-weg"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#AWeg'
label = 'Autosnelweg'
def __init__(self):
super().__init__()
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
8e9d4e496e365e8cebd9c1fcdd4d5915c6a06d19 | b8e29b6e957b0a55571f7cffc4357666a43fb56e | /mxnet/insightface/insightface/src/data/glint2lst.py | 60f7cbea9faa5f680ca985c8d7ed8ea79d00d8a4 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | aliyun/alibabacloud-aiacc-demo | b9bbe565021757ecaea0e7d7209632cbdb5cc8ab | 2e49deeb38d12d4af4c5e50bb15d731c4bbf4cf1 | refs/heads/master | 2023-05-14T08:09:33.067050 | 2023-05-04T08:19:51 | 2023-05-04T08:19:51 | 228,604,743 | 38 | 14 | Apache-2.0 | 2022-06-22T02:41:01 | 2019-12-17T11:46:44 | Python | UTF-8 | Python | false | false | 1,158 | py |
import sys
import os
import numpy as np
input_dir = sys.argv[1]
targets = sys.argv[2]
targets = targets.strip().split(',')
lmap = {}
for ds in targets:
#image_dir = os.path.join(input_dir, ds)
lmk_file = os.path.join(input_dir, "%s_lmk"%(ds))
if not os.path.exists(lmk_file):
lmk_file = os.path.join(input_dir, "%s_lmk.txt"%(ds))
if not os.path.exists(lmk_file):
continue
#print(ds)
idx = 0
for line in open(lmk_file, 'r'):
idx+=1
vec = line.strip().split(' ')
assert len(vec)==12 or len(vec)==11
image_file = os.path.join(input_dir, vec[0])
assert image_file.endswith('.jpg')
vlabel = -1 #test mode
if len(vec)==12:
label = int(vec[1])
if label in lmap:
vlabel = lmap[label]
else:
vlabel = len(lmap)
lmap[label] = vlabel
lmk = np.array([float(x) for x in vec[2:]], dtype=np.float32)
else:
lmk = np.array([float(x) for x in vec[1:]], dtype=np.float32)
lmk = lmk.reshape( (5,2) ).T
lmk_str = "\t".join( [str(x) for x in lmk.flatten()] )
print("0\t%s\t%d\t0\t0\t0\t0\t%s"%(image_file, vlabel, lmk_str))
#if idx>10:
# break
| [
"ziqi.yzq@alibaba-inc.com"
] | ziqi.yzq@alibaba-inc.com |
e85086b27f26e7ad3ddf0dcd9b1299ef6799cbcc | 169d809f45dedcaa3c7b1b49912d8b025abe18d9 | /factors.py | 6f66a11e8dd9fcecc3dbd419474103c0e3974875 | [] | no_license | bermec/challenges | 8a82d1d38d1ed1a0fc3f258443bc0054efc977a6 | 9fb092f20f12b4eaa808e758f00f482a49346c88 | refs/heads/master | 2021-10-08T05:05:56.803332 | 2018-12-08T00:20:20 | 2018-12-08T00:20:20 | 109,448,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | def factors(n):
lst = []
for x in range(1, n):
if n % x == 0:
lst.append(x)
return lst
def reduce_fraction(n, n2):
temp = 0
temp2 = 0
master_lst = []
master_lst2 = []
lst1 = (factors(n))
lst2 = (factors(n2))
master_lst.extend(lst1)
master_lst.extend(lst2)
for items in master_lst:
if items not in master_lst2:
master_lst2.append(items)
print(master_lst2)
#master_lst2.remove(1)
for x in range(0, len(master_lst2)):
num = master_lst2[x]
if n % num == 0 and n2 % num == 0:
div = n / master_lst2[x]
div2 = n2 / master_lst2[x]
temp = div
temp2 = div2
return (int(div), int(div2))
if __name__ == '__main__':
ans = factors(10 **3 - 1)
print(ans)
| [
"rog@pynguins.com"
] | rog@pynguins.com |
ce7ccdfe707353b7a81653921ba80ee0e0973723 | ea4e262f3dc18a089895fef08bedefc60b66e373 | /supervised_learning/0x0D-RNNs/5-bi_forward.py | ce1c6a1dd901ad3532e8ae1b7a72f5765d7c2bf5 | [] | no_license | d1sd41n/holbertonschool-machine_learning | 777899d4914e315883ba0c887d891c0c8ab01c8a | 5f86dee95f4d1c32014d0d74a368f342ff3ce6f7 | refs/heads/main | 2023-07-17T09:22:36.257702 | 2021-08-27T03:44:24 | 2021-08-27T03:44:24 | 317,399,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #!/usr/bin/env python3
"""[summary]
Returns:
[type]: [description]
"""
import numpy as np
class BidirectionalCell:
"""[summary]
"""
def __init__(self, i, h, o):
"""[summary]
Args:
i ([type]): [description]
h ([type]): [description]
o ([type]): [description]
"""
self.Whf = np.random.normal(size=(i + h, h))
self.Whb = np.random.normal(size=(i + h, h))
self.Wy = np.random.normal(size=(i + h + o, o))
self.bhf = np.zeros((1, h))
self.bhb = np.zeros((1, h))
self.by = np.zeros((1, o))
def forward(self, h_prev, x_t):
"""[summary]
Args:
h_prev ([type]): [description]
x_t ([type]): [description]
Returns:
[type]: [description]
"""
h_x = np.concatenate((h_prev, x_t), axis=1)
return np.tanh(np.matmul(
h_x, self.Whf
) + self.bhf)
| [
"1498@holbertonschool.com"
] | 1498@holbertonschool.com |
44680252265c4e7606a0c2ce3a626f198f9c5331 | fdcab1845d5f8b98e126d0e99c92fefdd426c850 | /experiments/pollen_met.py | f2c970387f18d9db8fba1f92490d566a51d312e1 | [
"MIT"
] | permissive | jjsanderson/DataBot | 197d9157be152ce679fd97f58557be7373b3f7f9 | 22d86b9bad2cd12bef735065a68619d114bfd6b1 | refs/heads/main | 2022-05-14T16:30:53.195429 | 2021-09-22T14:53:18 | 2021-09-22T14:53:18 | 187,226,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """Test retrieving pollen data from Met Office forecasts.
Replacing Bendadryl-sponsored service queried by pypollen module, as
that's proved unreliable in practice (just stops providing responses),
with no error.
"""
# Register with Met Office DataPoint service:
# https://www.metoffice.gov.uk/datapoint
from clientsecrets import metkey
# Oh, nuts: pollen count isn't included in the site data. Ugh.
| [
"lists@quernstone.com"
] | lists@quernstone.com |
45fcc0bee0c3a2ecebf1f0434ab531597d3bf8fd | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class2981.py | fc7c0615444d12cc4ebb6913a25a09662303e053 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=11
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.cx(input_qubit[3],input_qubit[0]) # number=36
prog.z(input_qubit[3]) # number=37
prog.cx(input_qubit[3],input_qubit[0]) # number=38
prog.x(input_qubit[3]) # number=35
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2981.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
61508d630274d9f4fcd2da8ef98105c85249f470 | 60611d5c40a73c0b7f420ef825b0721d0f4f5cb5 | /autodata/spiders/edmunds_feature_spider.py | ff43dc4128dec959cc229fcd7ba4dfdd83e06b4d | [] | no_license | Miksztowi/AutomotiveData | 315047e5b32e04cba72387bf3a1c341140dceba7 | fd413ad3b5672cf0ccde809033122f4a0adca60e | refs/heads/master | 2021-01-15T11:57:37.896995 | 2017-08-25T09:27:50 | 2017-08-25T09:27:50 | 99,636,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | # -*- coding:utf-8 -*-
import scrapy
import MySQLdb
import json
from autodata.items import EdmundsItem
import logging
import autodata.settings as settings
class EdmundsFeatureSpider(scrapy.Spider):
logger = logging.getLogger(__name__)
name = 'edmunds_feature_spider'
custom_settings = {
'CONCURRENT_REQUESTS': 88,
'DOWNLOAD_DELAY': 0,
'LOG_FILE': 'edmunds_feature.log'
}
def __init__(self):
self.connect = MySQLdb.connect(
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
db=settings.DB
)
self.cursor = self.connect.cursor()
def start_requests(self):
self.cursor.execute('SELECT id FROM edmunds_cars WHERE name IS NULL')
style_ids = [x[0] for x in self.cursor.fetchall()]
base_url = 'https://www.edmunds.com/api/groundwork/feature/styles?styleIds={}'
for style_id in style_ids:
url = base_url.format(style_id)
yield scrapy.Request(
url=url,
meta={'style_id': style_id}
)
def parse(self, response):
res_json = json.loads(response.text)
item = EdmundsItem()
if res_json.get('styles'):
styles = res_json.pop('styles')[0] # list
features = styles['features']
item['name'] = styles['name']
item['id'] = styles['id']
item['baseMsrp'] = styles['baseMsrp']
item['msrpWithTypicalOptions'] = styles['msrpWithTypicalOptions']
item['mpg'] = styles['mpg']
item['totalSeating'] = styles['totalSeating']
item['colors'] = json.dumps(styles['colors'])
item['safety'] = json.dumps(features['Safety'])
item['comfort_convenience'] = json.dumps(features['Comfort & Convenience'])
item['performance'] = json.dumps(features['Performance'])
item['technology'] = json.dumps(features['Technology'])
item['fuel'] = json.dumps(features['Fuel'])
item['engine'] = json.dumps(features['Engine'])
item['measurements'] = json.dumps(features['Measurements'])
item['frontseats'] = json.dumps(features['Frontseats'])
item['rearseats'] = json.dumps(features['Rearseats'])
item['drive_train'] = json.dumps(features['Drive Train'])
item['power_feature'] = json.dumps(features['Power Feature'])
item['instrumentation'] = json.dumps(features['Instrumentation'])
item['suspension'] = json.dumps(features['Suspension'])
item['in_car_entertainment'] = json.dumps(features['In Car Entertainment'])
item['warranty'] = json.dumps(features['Warranty'])
item['telematics'] = json.dumps(features['Telematics'])
item['tires_and_wheels'] = json.dumps(features['Tires and Wheels'])
item['interior_options'] = json.dumps(features['Interior Options'])
item['exterior_options'] = json.dumps(features['Exterior Options'])
item['packages'] = json.dumps(features['Packages'])
return item
def spider_closed(self, spider):
self.cursor.close()
self.connect.close()
self.connect.close()
spider.logger.info('Spider closed: %s', spider.name)
| [
"binwengan@gmail.com"
] | binwengan@gmail.com |
15b03267a304db2a215fee50bb9987974ec5f0ce | 2f67ac3081bbb6ac190dd2b7ea981a6b510a42c7 | /projeto/produto/models.py | 53042281b04b8d6d2cd5d2471c753bc1fece286d | [] | no_license | jm0216/estoque | 2fec817336327a5088cd8ff43b786787475fa86f | 7a03bd61cade1e1056d2894a78afb37c4969715c | refs/heads/master | 2020-08-03T22:26:47.146230 | 2019-09-30T03:26:49 | 2019-09-30T03:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | from django.db import models
from django.urls import reverse_lazy
class Produto(models.Model):
importado = models.BooleanField(default=False)
ncm = models.CharField('NCM', max_length=8)
produto = models.CharField(max_length=100, unique=True)
preco = models.DecimalField('preço', max_digits=7, decimal_places=2)
estoque = models.IntegerField('estoque atual')
estoque_minimo = models.PositiveIntegerField('estoque mínimo', default=0)
data = models.DateField(null=True, blank=True)
class Meta:
ordering = ('produto',)
def __str__(self):
return self.produto
def get_absolute_url(self):
return reverse_lazy('produto:produto_detail', kwargs={'pk': self.pk})
def to_dict_json(self):
return {
'pk': self.pk,
'produto': self.produto,
'estoque': self.estoque,
}
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
8424fb37043b1238fef3ebb6c071cfff717d5db1 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/tests/unit/__init__.py | 545088bbe157e6cddd5b973e0d539e80500ae27d | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 2,131 | py | begin_unit
comment|'# Copyright 2010 United States Government as represented by the'
nl|'\n'
comment|'# Administrator of the National Aeronautics and Space Administration.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\n:mod:`nova.tests.unit` -- Nova Unittests\n=====================================================\n\n.. automodule:: nova.tests.unit\n :platform: Unix\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'eventlet'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
nl|'\n'
name|'eventlet'
op|'.'
name|'monkey_patch'
op|'('
name|'os'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
comment|'# NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise'
nl|'\n'
comment|'# the threading.local() store used in oslo_messaging will be initialized to'
nl|'\n'
comment|'# threadlocal storage rather than greenthread local. This will cause context'
nl|'\n'
comment|'# sets and deletes in that storage to clobber each other.'
nl|'\n'
comment|'# NOTE(comstud): Make sure we have all of the objects loaded. We do this'
nl|'\n'
comment|'# at module import time, because we may be using mock decorators in our'
nl|'\n'
comment|'# tests that run at import time.'
nl|'\n'
name|'objects'
op|'.'
name|'register_all'
op|'('
op|')'
newline|'\n'
endmarker|''
end_unit
| [
"dmg@uvic.ca"
] | dmg@uvic.ca |
031519f8b4369daea30a2504dbd50ad7e005a5e3 | 0db0d08edd59df7cd634b66e2786bc1a0eb52048 | /httprunner/built_in.py | 32bbb851741b0732ed712492e06b36eeaa448cbb | [
"MIT"
] | permissive | liuyihuicaicloud/HttpRunner | 8bbbb25c51bb04439e8350a8d4187368e0138d85 | 0e63747c395e27b5d5952446ca9a86dd5ce15b95 | refs/heads/master | 2020-03-18T22:57:41.799491 | 2018-03-27T09:28:22 | 2018-03-27T09:28:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,864 | py | """
Built-in dependent functions used in YAML/JSON testcases.
"""
import json
import datetime
import random
import re
import string
import time
from httprunner.exception import ParamsError
from httprunner.utils import string_type
def gen_random_string(str_len):
""" generate random string with specified length
"""
return ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len))
def get_timestamp(str_len=13):
""" get timestamp string, length can only between 0 and 16
"""
if isinstance(str_len, int) and 0 < str_len < 17:
return str(time.time()).replace(".", "")[:str_len]
raise ParamsError("timestamp length can only between 0 and 16.")
def get_current_date(fmt="%Y-%m-%d"):
""" get current date, default format is %Y-%m-%d
"""
return datetime.datetime.now().strftime(fmt)
""" built-in comparators
"""
def equals(check_value, expect_value):
assert check_value == expect_value
def less_than(check_value, expect_value):
assert check_value < expect_value
def less_than_or_equals(check_value, expect_value):
assert check_value <= expect_value
def greater_than(check_value, expect_value):
assert check_value > expect_value
def greater_than_or_equals(check_value, expect_value):
assert check_value >= expect_value
def not_equals(check_value, expect_value):
assert check_value != expect_value
def string_equals(check_value, expect_value):
assert str(check_value) == str(expect_value)
def length_equals(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) == expect_value
def length_greater_than(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) > expect_value
def length_greater_than_or_equals(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) >= expect_value
def length_less_than(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) < expect_value
def length_less_than_or_equals(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) <= expect_value
def contains(check_value, expect_value):
assert isinstance(check_value, (list, tuple, dict, string_type))
assert expect_value in check_value
def contained_by(check_value, expect_value):
assert isinstance(expect_value, (list, tuple, dict, string_type))
assert check_value in expect_value
def type_match(check_value, expect_value):
def get_type(name):
if isinstance(name, type):
return name
elif isinstance(name, str):
try:
return __builtins__[name]
except KeyError:
raise ValueError(name)
else:
raise ValueError(name)
assert isinstance(check_value, get_type(expect_value))
def regex_match(check_value, expect_value):
assert isinstance(expect_value, string_type)
assert isinstance(check_value, string_type)
assert re.match(expect_value, check_value)
def startswith(check_value, expect_value):
assert str(check_value).startswith(str(expect_value))
def endswith(check_value, expect_value):
assert str(check_value).endswith(str(expect_value))
""" built-in hooks
"""
def get_charset_from_content_type(content_type):
""" extract charset encoding type from Content-Type
@param content_type
e.g.
application/json; charset=UTF-8
application/x-www-form-urlencoded; charset=UTF-8
@return: charset encoding type
UTF-8
"""
content_type = content_type.lower()
if "charset=" not in content_type:
return None
index = content_type.index("charset=") + len("charset=")
return content_type[index:]
def setup_hook_prepare_kwargs(method, url, kwargs):
if method == "POST":
content_type = kwargs.get("headers", {}).get("content-type")
if content_type and "data" in kwargs:
# if request content-type is application/json, request data should be dumped
if content_type.startswith("application/json"):
kwargs["data"] = json.dumps(kwargs["data"])
# if charset is specified in content-type, request data should be encoded with charset encoding
charset = get_charset_from_content_type(content_type)
if charset:
kwargs["data"] = kwargs["data"].encode(charset)
def setup_hook_httpntlmauth(method, url, kwargs):
if "httpntlmauth" in kwargs:
from requests_ntlm import HttpNtlmAuth
auth_account = kwargs.pop("httpntlmauth")
kwargs["auth"] = HttpNtlmAuth(
auth_account["username"], auth_account["password"])
def teardown_hook_sleep_1_secs(resp_obj):
""" sleep 1 seconds after request
"""
time.sleep(1)
| [
"httprunner"
] | httprunner |
4f4d7086f94610330443c76fdfe29be17caf5e0f | e690fa7a2e622f3c187b0349b37bca8fc40b2801 | /problem.py | e22603401e65ec441aafabf357efa839ca6c5a7f | [] | no_license | davidpendergast/proof-that-p-equals-np | 058503e8357b92de76c0438b634bbe37655816f6 | f9f4a59c2215ecffc2c466ee23de10bdc6e6b066 | refs/heads/master | 2020-07-03T05:11:38.270370 | 2016-11-19T08:04:10 | 2016-11-19T08:04:10 | 74,195,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | ALL_PROBLEMS = []
def add_to_list(prob):
ALL_PROBLEMS.append(prob)
return prob
class Problem:
def __init__(self, name, problem_input, verifier):
"""
Args:
name (string): Name of problem.
input (:obj:`list` of int): Inputs that will be fed to Programs.
verifier (:lambda: `list` of int -> boolean): Polynomial-time function that checks
the validity of a program output.
"""
self.name = name
self._input = problem_input
self._verifier = verifier
def is_correct(self, program_output):
return self._verifier(program_output)
def input(self):
return self._input[:]
"""
Seeks a program that outputs at least one 7.
bf solution: +[+.]
"""
SEVEN_PROBLEM = add_to_list(Problem("Output a 7",[], lambda x : 7 in x))
"""
Seeks a program that outputs a single 3.
bf solution: +++.
"""
SINGLE_THREE_PROBLEM = add_to_list(Problem("Output a Single 3", [], lambda x: x == [3]))
"""
Seeks a program that outputs the given inputs.
bf solution: ,[.,]
"""
_oti_input = [36, 60, 24, 5]
OUTPUT_THE_INPUTS = add_to_list(Problem("Identity Function", _oti_input, lambda x: x == _oti_input))
"""
Swap two inputs.
bf solution: ,>,.<.
P = 251855
"""
SWAP_TWO_INPUTS = add_to_list(Problem("Swap Two Inputs", [32, -4], lambda x: len(x) == 2 and x[0] == -4 and x[1] == 32))
"""
Find the sum of two inputs.
bf solution: ,>,[<+>-]<.
P = big
"""
_v1 = 25
_v2 = 11
SUM_TWO_INPUTS = add_to_list(Problem("Sum Two Inputs", [_v1, _v2], lambda x: len(x) == 1 and x[0] == _v1 + _v2))
"""
Classic NP-Complete zero sum subset problem.
bf poly-time solution: ????
"""
_zss_input_1 = [3, -3]
_zss_input_2 = [3, -2, -1]
_zss_input_3 = [1, 3, -5, 2, -4]
_input = _zss_input_1
def _check_zss(problem_input, output):
return all(i in problem_input for i in output) and sum(output) == 0 and len(set(output)) == len(problem_input)
ZERO_SUM_SUBSET_1 = add_to_list(Problem("Zero Sum Subset 1", _zss_input_1, lambda x: _check_zss(_zss_input_1, x)))
ZERO_SUM_SUBSET_2 = add_to_list(Problem("Zero Sum Subset 2", _zss_input_2, lambda x: _check_zss(_zss_input_2, x)))
ZERO_SUM_SUBSET_3 = add_to_list(Problem("Zero Sum Subset 3", _zss_input_3, lambda x: _check_zss(_zss_input_3, x)))
| [
"dlp75@case.edu"
] | dlp75@case.edu |
c24eb7d9bd2351befb0b5c1422aba4adf9be007a | 0386aecc51ca70621598d892d33847f5ee1ff393 | /server/app.py | 5d82acd21bb11d15f5e823f85252bbc05f1cff0a | [] | no_license | scmmishra/noteblock-beta | f82f0e3347ad10262f8512ceea59e184b78549fb | 33b4ab54cc04ba8462ed26af65b1436571cc52a5 | refs/heads/master | 2020-04-29T18:24:43.195975 | 2019-03-21T05:37:27 | 2019-03-21T05:37:27 | 176,323,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,789 | py | from datetime import datetime
from flask import Flask, jsonify, request
import uuid
from flask_cors import CORS
# configuration
DEBUG = True
# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)
CORS(app)
NOTES = [{
"title": "Mumbai Hackathon",
"id": uuid.uuid4().hex,
"author": "Shivam Mishra",
"created": datetime.now(),
"note": {
"type": "doc",
"content": [
{
"type": "heading",
"attrs": {
"level": 1
},
"content": [
{
"type": "text",
"text": "This is Mumbai Hackathon"
}
]
},
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Mumbai Hackathon is Annual Open Source Hackathon organized by the ERPNext Foundation and Don Bosco Institute of Technology. Every year, we welcome students, developers and designers from across the country to create incredible open source projects at Mumbai's largest Open Source Hackathon."
}
]
},
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Here's the "
},
{
"type": "text",
"marks": [
{
"type": "link",
"attrs": {
"href": "https://github.com/MumbaiHackathon"
}
}
],
"text": "repositories"
},
{
"type": "text",
"text": " of the amazing projects built at Mumbai Hackathon."
}
]
},
{
"type": "heading",
"attrs": {
"level": 2
},
"content": [
{
"type": "text",
"text": "Details for Mumbai Hackathon 2019"
}
]
},
{
"type": "bullet_list",
"content": [
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Date: 16th & 17th March 2019"
}
]
}
]
},
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Timings:"
}
]
},
{
"type": "bullet_list",
"content": [
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "9 AM - 6 PM on 16th March."
}
]
}
]
},
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "9 AM - 12 PM on 17th March."
}
]
}
]
}
]
}
]
},
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Prize Amount: Rs. 50,000"
}
]
}
]
}
]
}
]
}
},
{
"title": "Some More Note",
"id": uuid.uuid4().hex,
"author": "Shivam Mishra",
"created": datetime.now(),
"note": {
"type": "doc",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "This is some text."
}
]
}
]
}}]
@app.route("/api/notes", methods=['GET', 'POST'])
def notes():
response_object = {'status': 'success'}
response_object['notes'] = NOTES
return jsonify(response_object)
@app.route("/api/note/<note_id>", methods=['GET', 'POST'])
def single_note(note_id):
response_object = {'status': 'success'}
note = list(filter(lambda d: d['id'] == note_id, NOTES))
response_object['note'] = note
return jsonify(response_object)
@app.route("/api/ping", methods=['GET', 'POST'])
def ping():
response_object = {'status': 'success'}
response_object['message'] = "pong"
return jsonify(response_object)
@app.route("/")
def index():
return "Hello"
if __name__ == "__main__":
app.run() | [
"scm.mymail@gmail.com"
] | scm.mymail@gmail.com |
73f02b54e142ca8847ebe4278f41f5c50c7b370a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03242/s879564820.py | 77ea389167a1e41f6fb3e2ee175b767be724c41a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | s = list(str(input()))
ans = ""
for i in range(3):
if s[i] == "1":
ans = ans + "9"
else:
ans = ans + "1"
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a8e6e4c4bbe79dc52bb7a9f7155614c35f8c84aa | 86162656dc87928c35ac73f8ab1b3475304557c7 | /arelle/examples/plugin/validateSchemaLxml.py | 16ba4a7e7cbc2edb499088729a072b7617abe3bb | [
"Apache-2.0"
] | permissive | lmsanch/Arelle | 7558037f191cce70d71695e3192420af48c6984f | 45a8f922695832990653ec230e3fd3ffe8d3e4fa | refs/heads/master | 2021-01-18T16:48:24.104496 | 2013-04-13T07:26:51 | 2013-04-13T07:26:51 | 9,717,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,109 | py | '''
Save DTS is an example of a plug-in to both GUI menu and command line/web service
that will save the files of a DTS into a zip file.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
import threading
from lxml import etree
def validateSchemaWithLxml(modelXbrl, cntlr=None):
class schemaResolver(etree.Resolver):
def resolve(self, url, id, context):
if url.startswith("file:///__"):
url = importedFilepaths[int(url[10:])]
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(url)
return self.resolve_filename(filepath, context)
entryDocument = modelXbrl.modelDocument
# test of schema validation using lxml (trial experiment, commented out for production use)
from arelle import ModelDocument
imports = []
importedNamespaces = set()
importedFilepaths = []
'''
for mdlSchemaDoc in entryDocument.referencesDocument.keys():
if (mdlSchemaDoc.type == ModelDocument.Type.SCHEMA and
mdlSchemaDoc.targetNamespace not in importedNamespaces):
# actual file won't pass through properly, fake with table reference
imports.append('<xsd:import namespace="{0}" schemaLocation="file:///__{1}"/>'.format(
mdlSchemaDoc.targetNamespace, len(importedFilepaths)))
importedNamespaces.add(mdlSchemaDoc.targetNamespace)
importedFilepaths.append(mdlSchemaDoc.filepath)
'''
def importReferences(referencingDocument):
for mdlSchemaDoc in referencingDocument.referencesDocument.keys():
if (mdlSchemaDoc.type == ModelDocument.Type.SCHEMA and
mdlSchemaDoc.targetNamespace not in importedNamespaces):
importedNamespaces.add(mdlSchemaDoc.targetNamespace)
importReferences(mdlSchemaDoc) # do dependencies first
# actual file won't pass through properly, fake with table reference
imports.append('<xsd:import namespace="{0}" schemaLocation="file:///__{1}"/>'.format(
mdlSchemaDoc.targetNamespace, len(importedFilepaths)))
importedFilepaths.append(mdlSchemaDoc.filepath)
importReferences(entryDocument)
# add schemas used in xml validation but not DTS discovered
for mdlDoc in modelXbrl.urlDocs.values():
if mdlDoc.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.LINKBASE):
schemaLocation = mdlDoc.xmlRootElement.get("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
if schemaLocation:
ns = None
for entry in schemaLocation.split():
if ns is None:
ns = entry
else:
if ns not in importedNamespaces:
imports.append('<xsd:import namespace="{0}" schemaLocation="file:///__{1}"/>'.format(
ns, len(importedFilepaths)))
importedNamespaces.add(ns)
importedFilepaths.append(entry)
ns = None
schemaXml = '<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">\n{0}</xsd:schema>\n'.format(
'\n'.join(imports))
# trace schema files referenced
with open("c:\\temp\\test.xml", "w") as fh:
fh.write(schemaXml)
modelXbrl.modelManager.showStatus(_("lxml validator loading xml schema"))
schema_root = etree.XML(schemaXml)
import time
startedAt = time.time()
parser = etree.XMLParser()
parser.resolvers.add(schemaResolver())
schemaDoc = etree.fromstring(schemaXml, parser=parser, base_url=entryDocument.filepath+"-dummy-import.xsd")
schema = etree.XMLSchema(schemaDoc)
from arelle.Locale import format_string
modelXbrl.info("info:lxmlSchemaValidator", format_string(modelXbrl.modelManager.locale,
_("schema loaded in %.2f secs"),
time.time() - startedAt))
modelXbrl.modelManager.showStatus(_("lxml schema validating"))
# check instance documents and linkbases (sort for inst doc before linkbases, and in file name order)
for mdlDoc in sorted(modelXbrl.urlDocs.values(), key=lambda mdlDoc: (-mdlDoc.type, mdlDoc.filepath)):
if mdlDoc.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.LINKBASE):
startedAt = time.time()
docXmlTree = etree.parse(mdlDoc.filepath)
modelXbrl.info("info:lxmlSchemaValidator", format_string(modelXbrl.modelManager.locale,
_("schema validated in %.3f secs"),
time.time() - startedAt),
modelDocument=mdlDoc)
if not schema.validate(docXmlTree):
for error in schema.error_log:
modelXbrl.error("lxmlSchema:{0}".format(error.type_name.lower()),
error.message,
modelDocument=mdlDoc,
sourceLine=error.line)
modelXbrl.modelManager.showStatus(_("lxml validation done"), clearAfter=3000)
if cntlr is not None:
# if using GUI controller, not cmd line or web service, select the errors window when done
cntlr.uiThreadQueue.put((cntlr.logSelect, []))
def validateSchemaWithLxmlMenuEntender(cntlr, validationmenu):
# Insert as 2nd menu item for the lxml schema validation
validationmenu.insert_command(1, label="Validate schema with lxml",
underline=0,
command=lambda: validateSchemaWithLxmlMenuCommand(cntlr) )
def validateSchemaWithLxmlMenuCommand(cntlr):
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog(_("No taxonomy loaded."))
return
# do the schema validation in background (and afterwards focus on GUI messages pane)
thread = threading.Thread(target=lambda dts=cntlr.modelManager.modelXbrl, c=cntlr: validateSchemaWithLxml(dts, c))
thread.daemon = True
thread.start()
def validateSchemaWithLxmlCommandLineOptionExtender(parser):
# extend command line options with a save DTS option
parser.add_option("--validateSchemaWithLxml",
action="store_true",
dest="validateSchemaLxml",
help=_("Validate the schema with lxml (experimental)"))
def validateSchemaWithLxmlCommandLineXbrlRun(cntlr, options, modelXbrl):
# extend XBRL-loaded run processing for this option
if options.validateSchemaLxml:
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog(_("No taxonomy loaded."))
return
validateSchemaWithLxml(cntlr.modelManager.modelXbrl)
'''
Do not use _( ) in pluginInfo itself (it is applied later, after loading
'''
__pluginInfo__ = {
'name': 'Validate Schema with Lxml',
'version': '0.9',
'description': "This plug-in provides schema validation using lxml. As of 2012-05 "
" lxml does not properly schema validate XBRL schemas, which is why"
" it is provided in a plug-in instead of the main build. "
"For the GUI, this feature is inserted to the tools->validation menu 2nd position. "
"This is an experimental feature, not suitable for XBRL production use until lxml"
" schema validation becomes reliable for XBRL schemas.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Validation': validateSchemaWithLxmlMenuEntender,
'CntlrCmdLine.Options': validateSchemaWithLxmlCommandLineOptionExtender,
'CntlrCmdLine.Xbrl.Run': validateSchemaWithLxmlCommandLineXbrlRun,
}
| [
"fischer@markv.com"
] | fischer@markv.com |
a40ffcdf4abd0ed80e5920c73394506cb415f90e | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4D/4D-3K_wat_20Abox/set_2.py | 63587ba7792a0b793966efb059f21c70d4a63902 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4D/wat_20Abox/ti_one-step/4D_3K/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
68d772854072130cf95bf0eaa72dad9bbf69f7b9 | eb9da50f89a0ffaf6e46972f06abba05ea2e4132 | /footmark/ess/configuration.py | 582bbd055622fb2580b0cb41d110e411f12ca50c | [
"Apache-2.0"
] | permissive | alibaba/footmark | 0cf8e09ccf504541a3871e4026f8c7674e6c803c | bf698b5b4a0995dd84f81e7c4e9424cd9e120027 | refs/heads/master | 2023-03-12T21:15:09.244440 | 2022-07-18T08:09:18 | 2022-07-18T08:09:18 | 120,717,365 | 13 | 33 | Apache-2.0 | 2022-07-18T08:05:16 | 2018-02-08T05:52:28 | Python | UTF-8 | Python | false | false | 3,174 | py | """
Represents an ECS Instance
"""
from footmark.ess.essobject import TaggedESSObject
class ScalingConfiguration(TaggedESSObject):
"""
Represents an scaling configuration.
"""
def __init__(self, connection=None):
super(ScalingConfiguration, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'Scaling Configuration:%s' % self.id
def __getattr__(self, name):
if name == 'id':
return self.scaling_configuration_id
if name == 'name':
return self.scaling_configuration_name
if name == 'group_id':
return self.scaling_group_id
if name in ('state', 'status'):
return self.lifecycle_state
raise AttributeError("Object {0} does not have attribute {1}".format(self.__repr__(), name))
def __setattr__(self, name, value):
if name == 'id':
self.scaling_configuration_id = value
if name == 'name':
self.scaling_configuration_name = value
if name == 'group_id':
self.scaling_group_id = value
if name == 'lifecycle_state':
value = value.lower()
if name in ('state', 'status'):
self.lifecycle_state = value
if name == 'tags' and value:
v = {}
for tag in value['tag']:
if tag.get('tag_key'):
v[tag.get('tag_key')] = tag.get('tag_value', None)
value = v
super(TaggedESSObject, self).__setattr__(name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False):
"""
Update the instance's state information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if ECS returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from ECS.
"""
rs = self.connection.describe_configurations(self.scaling_group_id, [self.id])
if len(rs) > 0:
for r in rs:
if r.id == self.id:
self._update(r)
elif validate:
raise ValueError('%s is not a valid Scaling Configuration ID' % self.id)
return self.state
def active(self):
"""
Start the instance.
"""
return self.connection.start_instances([self.id])
def inactive(self, force=False):
"""
Stop the instance
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
"""
return self.connection.stop_instances([self.id], force)
def terminate(self):
"""
Terminate the instance
:type force: bool
:param force: Forces the instance to terminate
"""
return self.connection.terminate_configuration(self.id)
| [
"guimin.hgm@alibaba-inc.com"
] | guimin.hgm@alibaba-inc.com |
7620f9cf293949dd49959feab537b7f6f7f2e31e | 0ca41d628d7b34cc27487e889af62d1da17350bd | /openstack/network/v2/statuses.py | 99068d276846df3d3bbfeaa63cd46efe9f7b1250 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ArvinSong/huaweicloud-sdk-python | 9d5fcaf2ff93143de0e676a2fcc99f9a4b79fd9f | 227cd7f68a07974b16794c371f110d1927571fd0 | refs/heads/master | 2020-08-08T15:08:32.276961 | 2019-09-30T03:52:22 | 2019-09-30T03:52:22 | 213,855,251 | 1 | 0 | NOASSERTION | 2019-10-09T07:52:23 | 2019-10-09T07:52:23 | null | UTF-8 | Python | false | false | 1,186 | py | # -*- coding:utf-8 -*-
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from openstack import resource2
from openstack.network import network_service
class Statuses(resource2.Resource):
resource_key = 'statuses'
resources_key = 'statuses'
base_path = '/lbaas/loadbalancers/%(loadbalance_id)s/statuses'
service = network_service.NetworkService()
allow_create = False
allow_get = True
allow_update = False
allow_delete = False
allow_list = False
# loadbalancer
loadbalancer = resource2.Body("loadbalancer", type=dict)
# loadbalancer id
loadbalance_id = resource2.URI("loadbalance_id")
| [
"769837173@qq.com"
] | 769837173@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.