blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e30eac1ded6ffcfd4458f5a272fdbbeb01c07f3a
|
f3a7eae3031bb9afe75116a9b86278490ac4a7e6
|
/text/symbols.py
|
c329f2df647246d4d8e564a02e78e26b68ac2691
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
LOCS-AI/Multilanguage_Tacotron_2
|
ea34c4fb41e8112537529945b5a31cf2e78d0610
|
82c788fb26d93c6735c54c2fe4ae7bcbd0eec69f
|
refs/heads/master
| 2022-12-31T01:39:57.432804
| 2020-10-08T00:04:58
| 2020-10-08T00:04:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
hangul_symbol = u'''␀␃%"ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑᄒᅌᅡᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᆞᆢᆨᆩᆫᆬᆭᆮᆯᆰᆱᆲᆴᆶᆪᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂ'''
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + _arpabet + list(_letters)
symbols = list(hangul_symbol) + symbols
|
[
"thien@locslab.com"
] |
thien@locslab.com
|
1ac1bf0d486a318d12379426563fee9a8f6f22d6
|
fe85138c949c6198184c591780831fd2e183a24a
|
/Address Book.py
|
251c32fc6f328cd1f9352bc08e897b68bbe90efc
|
[] |
no_license
|
valeri1383/Personal-Python-Projects
|
e98f6b7171298def019db4e28f6d176a709615cc
|
b7db81cb44668f549a7fd15de84c0cb23654ac3d
|
refs/heads/main
| 2023-05-26T09:02:24.260700
| 2023-05-22T14:40:28
| 2023-05-22T14:40:28
| 337,518,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
from tkinter import *
root = Tk()
root.geometry('400x400')
root.configure(bg='cyan')
root.resizable(1, 1)
root.title('Address Book')
contact_list = [
['John Smith', '07567374343'],
['Terry Adams', '07569984343'],
['Allen Gibson', '07564474743'],
['Grant Foster', '07567396843'],
['Hall Grey', '07567746343']
]
Name = StringVar()
Number = StringVar()
frame = Frame(root)
frame.pack(side=RIGHT)
scroll = Scrollbar(frame, orient=VERTICAL)
select = Listbox(frame,bg='light goldenrod', yscrollcommand=scroll.set, width=30, height=33)
scroll.configure(command=select.yview)
scroll.pack(side=RIGHT, fill=Y)
select.pack(side=LEFT, fill=BOTH, expand=1)
def Selected():
return int(select.curselection()[0])
def AddContact():
contact_list.append([Name.get(), Number.get()])
Select_set()
def EDIT():
contact_list[Selected()] = [Name.get(), Number.get()]
Select_set()
def DELETE():
del contact_list[Selected()]
Select_set()
def VIEW():
NAME, PHONE = contact_list[Selected()]
Name.set(NAME)
Number.set(PHONE)
def EXIT():
root.destroy()
def RESET():
Name.set('')
Number.set('')
def Select_set():
contact_list.sort()
select.delete(0, END)
for name, phone in contact_list:
select.insert(END, name)
Select_set()
Label(root, text='NAME', font='arial 15 bold', bg='cyan').pack()
Entry(root, font=20, bg='light yellow', textvariable=Name).pack()
Label(root, text='PHONE NO.', font='arial 15 bold', bg='cyan').pack()
Entry(root, font=20,bg='light yellow', textvariable=Number).pack()
Button(root, text='ADD', width=7, font='arial 15 bold', bg='SlateGray4', command=AddContact).pack()
Button(root, text='EDIT', width=7, font='arial 15 bold', bg='SlateGray4', command=EDIT).pack()
Button(root, text="DELETE", width=7, font='arial 15 bold', bg='SlateGray4', command=DELETE).pack()
Button(root, text="VIEW", width=7, font='arial 15 bold', bg='SlateGray4', command=VIEW).pack()
Button(root, text="EXIT", width=7, font='arial 15 bold', bg='tomato', command=EXIT).pack()
Button(root, text="RESET", width=7, font='arial 15 bold', bg='SlateGray4', command=RESET).pack()
mainloop()
|
[
"noreply@github.com"
] |
valeri1383.noreply@github.com
|
df8348437cb3f52a36143204a8098092a7baae05
|
cdd2003610c4c451dc38781d5ece2cf4e8138c27
|
/src/convert_rviz.py
|
cd66d10b1a8cd9aecf17d38b1ef969533384d9a9
|
[] |
no_license
|
DLu/rwt_config_generator
|
7efb29d773dddae0868be14606ba91893fae806c
|
873b1aa0d4c94cdba3b15ef85d46f70c26f6dc86
|
refs/heads/master
| 2020-12-24T16:24:02.304617
| 2016-03-03T19:04:52
| 2016-03-03T19:04:52
| 39,230,985
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
#!/usr/bin/python
from __future__ import print_function
import sys
import yaml
from rwt_config_generator import *
import argparse
import rospy
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
parser = argparse.ArgumentParser()
parser.add_argument('rviz_config')
parser.add_argument('output_html_file', nargs='?')
parser.add_argument('-b', '--bson', action='store_true')
parser.add_argument('-u', '--host', type=str, nargs='?')
args = parser.parse_args(rospy.myargv()[1:])
rviz = yaml.load( open(args.rviz_config) )['Visualization Manager']
def to_hex(s):
if s is None:
return None
ns = tuple(map(int, s.split(';')))
s = '0x%02x%02x%02x'%ns
return s
def get(key, d=None):
if d is None:
d = rviz
for s in key.split('/'):
d = d.get(s, None)
if d==None:
return None
return d
def parse_displays(c, displays):
for display in displays:
if not display.get('Enabled', True):
continue
cls = display['Class']
if cls == 'rviz/Grid':
c.add_grid()
elif cls == 'rviz/RobotModel':
c.add_model(param=display.get('Robot Description'), tfPrefix=display.get('TF Prefix'))
elif cls == 'rviz/Marker':
c.add_markers(topic=display.get('Marker Topic'))
elif cls == 'rviz/MarkerArray':
c.add_marker_array(topic=display.get('Marker Topic'))
elif cls == 'rviz/InteractiveMarkers':
topic = display.get('Update Topic')
topic = topic.replace('/update', '')
c.add_imarkers(topic=topic)
elif cls == 'rviz/PointCloud2':
c.add_pointcloud(topic=display.get('Topic'), size=display.get('Size (m)'))
elif cls == 'rviz/LaserScan':
c.add_laserscan(topic=display.get('Topic'), color=to_hex(display.get('Color')), size=display.get('Size (m)'))
elif cls == 'rviz/Path':
c.add_path(topic=display.get('Topic'), color=to_hex(display.get('Color')))
elif cls == 'rviz/Polygon':
c.add_polygon(topic=display.get('Topic'), color=to_hex(display.get('Color')))
elif cls == 'rviz/Pose':
c.add_pose(topic=display.get('Topic'), color=to_hex(display.get('Color')),
shaft_radius=display.get('Shaft Radius'),
head_radius=display.get('Head Radius'),
shaft_length=display.get('Shaft Length'),
head_length=display.get('Head Length'))
elif cls == 'rviz/Odometry':
c.add_odometry(topic=display.get('Topic'), color=to_hex(display.get('Color')),
shaft_length=display.get('Length'), keep=display.get('Keep'))
elif cls == 'rviz/PoseArray':
c.add_posearray(topic=display.get('Topic'), color=to_hex(display.get('Color')), length=display.get('Arrow Length'))
elif cls == 'rviz/PointStamped':
c.add_point(topic=display.get('Topic'), color=to_hex(display.get('Color')), radius=display.get('Radius'))
elif cls == 'rviz/Group':
parse_displays( c, display['Displays'] )
elif cls == 'rviz/Map':
c.add_map(topic=display.get('Topic'), alpha=display.get('Alpha'), tf=True)
else:
warning("Class %s not supported yet!"%cls)
frame = get('Global Options/Fixed Frame')
c = RWTConfig(host=args.host, fixed_frame=frame)
if args.bson:
c.add_bson_header()
parse_displays(c, get('Displays'))
if args.output_html_file:
with open(args.output_html_file, 'w') as f:
f.write(str(c))
else:
print(c)
|
[
"davidvlu@gmail.com"
] |
davidvlu@gmail.com
|
3c36c0d10742f9c25af173e2077d9c835a3e3ff8
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/celery/2015/12/graph.py
|
d441a54ca1edf2545aaaa16e0d18be8ec8d7318d
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 6,432
|
py
|
# -*- coding: utf-8 -*-
"""
The :program:`celery graph` command.
.. program:: celery graph
"""
from __future__ import absolute_import, unicode_literals
from operator import itemgetter
from celery.datastructures import DependencyGraph, GraphFormatter
from celery.five import items
from .base import Command
__all__ = ['graph']
class graph(Command):
args = """<TYPE> [arguments]
..... bootsteps [worker] [consumer]
..... workers [enumerate]
"""
def run(self, what=None, *args, **kwargs):
map = {'bootsteps': self.bootsteps, 'workers': self.workers}
if not what:
raise self.UsageError('missing type')
elif what not in map:
raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map)))
return map[what](*args, **kwargs)
def bootsteps(self, *args, **kwargs):
worker = self.app.WorkController()
include = {arg.lower() for arg in args or ['worker', 'consumer']}
if 'worker' in include:
graph = worker.blueprint.graph
if 'consumer' in include:
worker.blueprint.connect_with(worker.consumer.blueprint)
else:
graph = worker.consumer.blueprint.graph
graph.to_dot(self.stdout)
def workers(self, *args, **kwargs):
def simplearg(arg):
return maybe_list(itemgetter(0, 2)(arg.partition(':')))
def maybe_list(l, sep=','):
return (l[0], l[1].split(sep) if sep in l[1] else l[1])
args = dict(simplearg(arg) for arg in args)
generic = 'generic' in args
def generic_label(node):
return '{0} ({1}://)'.format(type(node).__name__,
node._label.split('://')[0])
class Node(object):
force_label = None
scheme = {}
def __init__(self, label, pos=None):
self._label = label
self.pos = pos
def label(self):
return self._label
def __str__(self):
return self.label()
class Thread(Node):
scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow',
'shape': 'oval', 'fontsize': 10, 'width': 0.3,
'color': 'black'}
def __init__(self, label, **kwargs):
self._label = 'thr-{0}'.format(next(tids))
self.real_label = label
self.pos = 0
class Formatter(GraphFormatter):
def label(self, obj):
return obj and obj.label()
def node(self, obj):
scheme = dict(obj.scheme) if obj.pos else obj.scheme
if isinstance(obj, Thread):
scheme['label'] = obj.real_label
return self.draw_node(
obj, dict(self.node_scheme, **scheme),
)
def terminal_node(self, obj):
return self.draw_node(
obj, dict(self.term_scheme, **obj.scheme),
)
def edge(self, a, b, **attrs):
if isinstance(a, Thread):
attrs.update(arrowhead='none', arrowtail='tee')
return self.draw_edge(a, b, self.edge_scheme, attrs)
def subscript(n):
S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄',
'5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'}
return ''.join([S[i] for i in str(n)])
class Worker(Node):
pass
class Backend(Node):
scheme = {'shape': 'folder', 'width': 2,
'height': 1, 'color': 'black',
'fillcolor': 'peachpuff3', 'color': 'peachpuff4'}
def label(self):
return generic_label(self) if generic else self._label
class Broker(Node):
scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3',
'color': 'cadetblue4', 'height': 1}
def label(self):
return generic_label(self) if generic else self._label
from itertools import count
tids = count(1)
Wmax = int(args.get('wmax', 4) or 0)
Tmax = int(args.get('tmax', 3) or 0)
def maybe_abbr(l, name, max=Wmax):
size = len(l)
abbr = max and size > max
if 'enumerate' in args:
l = ['{0}{1}'.format(name, subscript(i + 1))
for i, obj in enumerate(l)]
if abbr:
l = l[0:max - 1] + [l[size - 1]]
l[max - 2] = '{0}⎨…{1}⎬'.format(
name[0], subscript(size - (max - 1)))
return l
try:
workers = args['nodes']
threads = args.get('threads') or []
except KeyError:
replies = self.app.control.inspect().stats()
workers, threads = [], []
for worker, reply in items(replies):
workers.append(worker)
threads.append(reply['pool']['max-concurrency'])
wlen = len(workers)
backend = args.get('backend', self.app.conf.result_backend)
threads_for = {}
workers = maybe_abbr(workers, 'Worker')
if Wmax and wlen > Wmax:
threads = threads[0:3] + [threads[-1]]
for i, threads in enumerate(threads):
threads_for[workers[i]] = maybe_abbr(
list(range(int(threads))), 'P', Tmax,
)
broker = Broker(args.get(
'broker', self.app.connection_for_read().as_uri()))
backend = Backend(backend) if backend else None
graph = DependencyGraph(formatter=Formatter())
graph.add_arc(broker)
if backend:
graph.add_arc(backend)
curworker = [0]
for i, worker in enumerate(workers):
worker = Worker(worker, pos=i)
graph.add_arc(worker)
graph.add_edge(worker, broker)
if backend:
graph.add_edge(worker, backend)
threads = threads_for.get(worker._label)
if threads:
for thread in threads:
thread = Thread(thread)
graph.add_arc(thread)
graph.add_edge(thread, worker)
curworker[0] += 1
graph.to_dot(self.stdout)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
4eb09dfed6ad25c8eddd6132f2dc73dff3fcc6a3
|
1933ef2c5b3ec58feeb50dd092d670f58a3ec2bb
|
/kospeech/models/modules.py
|
352b6a0bd0bf59f8861fa3d7e573569560a2ad30
|
[
"Apache-2.0"
] |
permissive
|
hephaex/KoSpeech
|
68275af311ae5c53548f7c7bc27fe9dd5b1e441b
|
bf3fa0dc6d50089164fd0b47e02620062718d407
|
refs/heads/master
| 2022-12-02T02:00:01.164265
| 2020-08-05T08:47:55
| 2020-08-05T08:47:55
| 285,344,731
| 0
| 0
|
Apache-2.0
| 2020-08-12T14:53:11
| 2020-08-05T16:22:59
| null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
import torch
import torch.nn as nn
import torch.nn.init as init
from torch import Tensor
class Linear(nn.Module):
"""
Wrapper class of torch.nn.Linear
Weight initialize by xavier initialization and bias initialize to zeros.
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(Linear, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
init.xavier_uniform_(self.linear.weight)
if bias:
init.zeros_(self.linear.bias)
def forward(self, x: Tensor) -> Tensor:
return self.linear(x)
class LayerNorm(nn.Module):
""" Wrapper class of torch.nn.LayerNorm """
def __init__(self, dim: int, eps: float = 1e-6) -> None:
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.beta = nn.Parameter(torch.zeros(dim))
self.eps = eps
def forward(self, z: Tensor) -> Tensor:
mean = z.mean(dim=-1, keepdim=True)
std = z.std(dim=-1, keepdim=True)
output = (z - mean) / (std + self.eps)
output = self.gamma * output + self.beta
return output
class View(nn.Module):
""" Wrapper class of torch.view() for Sequential module. """
def __init__(self, shape: tuple, contiguous: bool = False):
super(View, self).__init__()
self.shape = shape
self.contiguous = contiguous
def forward(self, inputs):
if self.contiguous:
inputs = inputs.contiguous()
return inputs.view(*self.shape)
|
[
"sh951011@gmail.com"
] |
sh951011@gmail.com
|
525051e2943540875900fe0b6db434ee527c30ba
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/examples/v1/usage-metering/GetUsageNetworkFlows_1239422069.py
|
60afb66b6f88d5918aba22ca4b3b72c0ab5be76d
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
"""
Get hourly usage for Network Flows returns "OK" response
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.usage_metering_api import UsageMeteringApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = UsageMeteringApi(api_client)
response = api_instance.get_usage_network_flows(
start_hr=(datetime.now() + relativedelta(days=-5)),
end_hr=(datetime.now() + relativedelta(days=-3)),
)
print(response)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
16ffe2ce0b7d1d05344cc7814fd04b63e4a84196
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_4/hrbmax002/piglatin.py
|
32eb09647dd7f6c75cec56edc0b28a10e8811327
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
def toPigLatin(s):
if s[len(s)-1] != " ":
s = s + " "
answer = ""
while len(s)>0:
temp = s[0:s.index(" ")]
s = s[s.index(" ")+1:]
if temp[0].upper() in ["A","E","I","O","U"]:
temp = temp + "way "
else:
temp = temp + "a"
while temp[0].upper() not in ["A","E","I","O","U"]:
temp = temp[1:] + temp[0]
temp = temp + "ay "
answer = answer + temp
answer = answer[0:len(answer)-1]
return answer
def toEnglish(s):
if s[len(s)-1] != " ":
s = s + " "
answer = ""
while len(s)>0:
temp = s[0:s.index(" ")]
s = s[s.index(" ")+1:]
if temp[-3:]=="way":
answer = answer + " " + temp[0:-3]
else:
temp = temp[0:-2]
while temp[-1] != "a":
temp = temp[-1] + temp[0:-1]
answer = answer + " " + temp[0:-1]
return answer[1:]
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
eb085418aab782c970d7166273fd9b9262c46f5b
|
c858d9511cdb6a6ca723cd2dd05827d281fa764d
|
/MFTU/lesson 7/Test work/test_F.py
|
b6885f38ec053864866d442146f62a2ba115c3a5
|
[] |
no_license
|
DontTouchMyMind/education
|
0c904aa929cb5349d7af7e06d9b1bbaab972ef95
|
32a53eb4086b730cc116e633f68cf01f3d4ec1d1
|
refs/heads/master
| 2021-03-12T11:15:02.479779
| 2020-09-17T08:19:50
| 2020-09-17T08:19:50
| 246,616,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# Необходимо найти НОД двух чисел, используя алгоритм Евклида.
#
# Формат входных данных
# На вход подаются два натуральных числа, по числу в новой строке.
#
# Формат выходных данных
# Одно число - НОД входных чисел.
def gcd(a, b):
if a == b:
return a
elif a > b:
return gcd(a - b, b)
else:
return gcd(a, b - a)
n1 = int(input())
n2 = int(input())
print(gcd(n1, n2))
|
[
"tobigface@gmail.com"
] |
tobigface@gmail.com
|
6450073c33cb50db18dc4b145b95d18e75ee47b0
|
e2d22f12f8e540a80d31de9debe775d35c3c5c22
|
/blousebrothers/confs/migrations/0037_auto_20170117_1535.py
|
6841343b2a40c2fbb431ff15ae9ddfd4cd5a80ee
|
[
"MIT"
] |
permissive
|
sladinji/blousebrothers
|
360c3b78ec43379977dbf470e5721e6a695b2354
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
refs/heads/master
| 2022-12-20T10:24:07.631454
| 2019-06-13T13:17:35
| 2019-06-13T13:17:35
| 66,867,705
| 1
| 0
|
NOASSERTION
| 2022-12-19T18:15:44
| 2016-08-29T18:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 813
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-01-17 15:35
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('confs', '0036_auto_20170110_1100'),
]
operations = [
migrations.AlterField(
model_name='conference',
name='price',
field=models.DecimalField(decimal_places=2, default=Decimal('0.5'), help_text='', max_digits=6, verbose_name='Prix de vente'),
),
migrations.AlterField(
model_name='conference',
name='type',
field=models.CharField(choices=[('DCP', 'DCP'), ('QI', 'QI'), ('LCA', 'LCA')], default='DP', max_length=10, verbose_name='Type'),
),
]
|
[
"julien.almarcha@gmail.com"
] |
julien.almarcha@gmail.com
|
c8e2155ef68a3eba87ea0e8c4cab9b582c3f5355
|
8bc3e7bd0fa1714b3d0466e940ed801cf9a4c5d4
|
/pyvisual/node/io/system_var.py
|
2e6dfeaf5a70761d5951b4abff26e7ec2a04eaae
|
[] |
no_license
|
m0r13/pyvisual
|
d99b3512fefaf4a2164362a0b7aabd1df9ecee03
|
f6b3e2217e647b80f1379716c00e8adb53975bca
|
refs/heads/master
| 2022-02-21T22:24:22.467475
| 2019-06-17T20:38:48
| 2019-06-17T20:38:48
| 140,211,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,802
|
py
|
import json
import os
import time
from collections import defaultdict, OrderedDict
import imgui
from pyvisual.node import dtype, value
from pyvisual.node.base import Node
from pyvisual.editor import widget
SERIALIZATION_WRITE_INTERVAL = 5.0
SERIALIZATION_FILE = "system_vars.json"
# if you add another variable with another dtype than here, add the name of the dtype below!
VARIABLES = OrderedDict([
("gain", {"dtype" : dtype.float, "dtype_args" : {"default" : 4.0, "range" : [0.0, float("inf")]}}),
("threshold", {"dtype" : dtype.float, "dtype_args" : {"default" : 0.4, "range" : [0.0, float("inf")]}}),
("ref_aspect", {"dtype" : dtype.str, "dtype_args" : {"default" : "16:9"}}),
("ref_highres_height", {"dtype" : dtype.int, "dtype_args" : {"default" : 1080, "range" : [0, float("inf")]}}),
("ref_lowres_height", {"dtype" : dtype.int, "dtype_args" : {"default" : 720, "range" : [0, float("inf")]}}),
("ref_noiseres_height", {"dtype" : dtype.int, "dtype_args" : {"default" : 512, "range" : [0, float("inf")]}}),
])
# name -> value for each variable
values = OrderedDict()
# name -> widget for each variable
widgets = OrderedDict()
# dtype -> list of (name, value)
values_by_dtype = defaultdict(lambda: [])
# initialize values and widgets that are associated with variables
for name, spec in VARIABLES.items():
assert "dtype" in spec
dt = spec["dtype"]
dt_args = spec.get("dtype_args", {})
default_value = dt.default
if "default" in dt_args:
default_value = dt_args["default"]
v = value.SettableValue(default_value)
w = widget.create_widget(dt, dt_args)
w.width = widget.WIDGET_WIDTH * 1.5
values[name] = v
values_by_dtype[dt].append((name, v))
widgets[name] = w
_variables_dirty = False
_variables_last_written = 0
_node_instances = set()
# Important: Call this when changed a value! (Is done by editor for example)
def notify_change():
global _variables_dirty
_variables_dirty = True
for instance in _node_instances:
instance.force_evaluate()
# if the nodes would take over the values if they are changed only,
# then this would need to be changed probably
for value in values.values():
value.reset_changed()
def read_variables():
serialized_values = {}
if not os.path.isfile(SERIALIZATION_FILE):
return
serialized_values = json.load(open(SERIALIZATION_FILE))
for name, serialized_value in serialized_values.items():
if name not in VARIABLES:
continue
value = values[name]
dt = VARIABLES[name]["dtype"]
value.value = dt.base_type.unserialize(serialized_values[name])
notify_change()
read_variables()
def write_variables(force=False):
global _variables_dirty, _variables_last_written
if force or time.time() - _variables_last_written > SERIALIZATION_WRITE_INTERVAL:
_variables_dirty = False
_variables_last_written = time.time()
data = {}
for name, spec in VARIABLES.items():
value = values[name].value
data[name] = spec["dtype"].base_type.serialize(value)
with open("system_vars.json", "w") as f:
json.dump(data, f)
class GetSystemVar(Node):
DTYPE = None
class Meta:
inputs = [
{"name" : "name", "dtype" : dtype.str, "hide" : True}
]
options = {
"virtual" : True
}
def __init__(self):
super().__init__()
self._value = None
@property
def collapsed_node_title(self):
return "get system var: %s" % self.get("name")
def start(self, graph):
_node_instances.add(self)
name = self.get("name")
if name:
self._value = values.get(name, None)
if self._value is None:
self.get_input("name").value = ""
def _evaluate(self):
output = self.get_output("output")
if self._value != None:
output.value = self._value.value
def stop(self):
_node_instances.remove(self)
def _show_custom_ui(self):
selected_name = self.get("name")
preview = selected_name if selected_name else "<none>"
if imgui.begin_combo("", preview):
is_selected = not selected_name
opened, selected = imgui.selectable("<none>", is_selected)
if opened:
self.get_input("name").value = ""
self._value = None
if is_selected:
imgui.set_item_default_focus()
imgui.separator()
for name, value in values_by_dtype.get(self.DTYPE, []):
is_selected = name == selected_name
opened, selected = imgui.selectable(name, is_selected)
if opened:
self.get_input("name").value = name
self._value = value
if is_selected:
imgui.set_item_default_focus()
imgui.end_combo()
@classmethod
def get_presets(cls, graph):
presets = []
for name, value in values_by_dtype.get(cls.DTYPE, []):
presets.append((name, {"i_name" : name}))
return presets
dtype_capital_names = {
dtype.float : "Float",
dtype.str : "Str",
dtype.int : "Int",
}
# create a GetXXXSystemVar class for each dtype
node_classes = []
for dt in values_by_dtype.keys():
name = "Get%sSystemVar" % dtype_capital_names[dt]
class Meta:
outputs = [
{"name" : "output", "dtype" : dt, "manual_input": True},
]
options = {
"virtual" : False,
"show_title" : False
}
cls = type(name, (GetSystemVar,), {"DTYPE" : dt, "Meta" : Meta, "__module__" : __name__})
node_classes.append(cls)
|
[
"moritz.hilscher@gmail.com"
] |
moritz.hilscher@gmail.com
|
1465bbad98fe6c51d22d31a82efaa6fba3362f45
|
e8a285cb1dcdae6f1b6d8506b8d25a1d031d6cd7
|
/cpptools/tests/test_write_pythia_hepmc3.py
|
d4e73a3185bc0137d2756b3b3f25a6b491647b97
|
[] |
no_license
|
matplo/heppy
|
f30558e4ff3c1720c63b4d82f739b3f8acadc53e
|
88c931e3e7dcf57a3a476ef0a92f0204491cafb9
|
refs/heads/master
| 2023-07-07T18:17:04.486149
| 2023-06-29T20:45:32
| 2023-06-29T20:45:32
| 201,352,733
| 5
| 8
| null | 2023-07-04T21:57:31
| 2019-08-08T23:33:39
|
C
|
UTF-8
|
Python
| false
| false
| 782
|
py
|
#!/usr/bin/env python
import pythia8
import pythiahepmc3
def create_and_init_pythia(config_strings=[]):
pythia = pythia8.Pythia()
for s in config_strings:
pythia.readString(s)
for extra_s in ["Next:numberShowEvent = 0", "Next:numberShowInfo = 0", "Next:numberShowProcess = 0", "Next:numberCount = 0"]:
pythia.readString(extra_s)
if pythia.init():
return pythia
return None
def main():
pythia = create_and_init_pythia(["PhaseSpace:pTHatMin = 2", "HardQCD:all = on"])
sfoutname = "test_write_pythia_hepmc3.dat"
pyhepmcwriter = pythiahepmc3.Pythia8HepMCWrapper(sfoutname)
for iEvent in range(100):
if not pythia.next(): continue
pyhepmcwriter.fillEvent(pythia)
pythia.stat()
print("[i] done writing to {}".format(sfoutname))
if __name__ == '__main__':
main()
|
[
"ploskon@gmail.com"
] |
ploskon@gmail.com
|
b1dc9e505c919a677e4ad516ba5eb32f5820c244
|
610dedfb6e21d297e8cdbcba599a4e564bd785cb
|
/EstruturaDeRepeticao/estruturaderepeticao-09.py
|
8b4c1153a41989cbf2047c8067840d6a96441880
|
[] |
no_license
|
zumbipy/PythonExercicios
|
f7b2ddf2376b9ecb2aedc77531e3571dc746a12b
|
7a17b78cf927a2889b93238542e90e00810c43e6
|
refs/heads/master
| 2021-01-23T10:43:47.997462
| 2018-07-22T14:58:44
| 2018-07-22T14:58:44
| 93,086,120
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# Telegram: @ZumbiPy __ _ ___
# /_ / __ ____ _ / / (_) _ \__ __
# / /_/ // / ' \/ _ \/ / ___/ // /
# /___/\_,_/_/_/_/_.__/_/_/ \_, /
# E-mail: zumbipy@gmail.com /___/
"""
09 - Faça um programa que imprima na tela apenas os números
ímpares entre 1 e 50.
"""
# ================================================================================
# Logica do Programa.
# ================================================================================
for i in range(1, 50):
# Quando resto de uma divisao por 2 for 0 ele e par se nao e ímpar.
if i % 2 != 0:
print(i)
print("=" * 72)
# ou
for i in range(1, 50, 2):
print(i)
|
[
"zumbipy@gmail.com"
] |
zumbipy@gmail.com
|
e3bb0a08160c3b5afbb1561fc67f5e5b2b320380
|
43a676d507c9f3e007d46b9335c82f77e35350f6
|
/config/wsgi.py
|
df17ccb416ed061cc0afd7cf24b277bc198a94b4
|
[] |
no_license
|
Zoxon470/nekidaem-blog
|
79136fd9f4747afd01beb02bfd9d0c524493a6f6
|
c2539963d149841397e9eb2d4153a73abea15da2
|
refs/heads/master
| 2022-05-02T20:14:05.805564
| 2019-06-27T21:50:57
| 2019-06-27T21:50:57
| 194,165,211
| 0
| 2
| null | 2022-04-22T21:53:15
| 2019-06-27T21:25:07
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 340
|
py
|
import os
import sys
from django.core.wsgi import get_wsgi_application
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'nekidaem-blog'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'config.settings.dev')
application = get_wsgi_application()
|
[
"zoxon470@gmail.com"
] |
zoxon470@gmail.com
|
b2cd196a4e77d83e542be25199838e0b8ec80ff9
|
ad357cfbec64afb8f4cc4043b212996768f9755c
|
/api/assessment/automate/formatters.py
|
dac02f8f9749219cec476cf1e0392f3c9036f96a
|
[
"MIT"
] |
permissive
|
uktrade/market-access-api
|
6b4680e6455eb5c25480ccd3e3d9445654269f36
|
4da26d1be53843d22411577409d9489010bdda09
|
refs/heads/master
| 2023-08-30T14:47:10.373148
| 2023-08-29T13:58:08
| 2023-08-29T13:58:08
| 131,856,014
| 2
| 3
|
MIT
| 2023-09-14T08:04:42
| 2018-05-02T13:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,065
|
py
|
def rca(import_value, export_value):
if import_value is None or export_value is None:
return "NA"
elif import_value > 0 and export_value > 0:
return "Specialised"
elif import_value < 0 and export_value < 0:
return "Unspecialised"
return "Inconclusive"
def rca_diff(import_value, export_value, country1, country2):
if import_value is None or export_value is None:
return "NA"
elif import_value > 0 and export_value > 0:
return f"{country2} more specialised globally than in {country1}"
elif import_value < 0 and export_value < 0:
return f"{country2} more specialised in {country1} than globally"
return "Inconclusive"
def rca_diff_glob(import_value, export_value, country1, country2):
if import_value is None or export_value is None:
return "NA"
elif import_value > 0 and export_value > 0:
return f"{country2} more specialised globally than {country1}"
elif import_value < 0 and export_value < 0:
return f"{country1} more specialised globally than {country2}"
return "Inconclusive"
def format_value(value):
if value < 1000:
return f"£{round(value, 0)}"
elif value > 1000000000:
return f"£{round(value, -8) / 1000000000}bn"
elif value > 1000000:
return f"£{round(value, -5) / 1000000}m"
return f"£{round(value, -2) / 1000}k"
def value_range(import_value, export_value):
if import_value < export_value:
return f"{format_value(import_value)} - {format_value(export_value)}"
return f"{format_value(export_value)} - {format_value(import_value)}"
def percent_range(import_value, export_value, decimal_places):
import_value *= 100
export_value *= 100
if import_value == export_value:
return f"{round(import_value, decimal_places)}%"
elif import_value < export_value:
return f"{round(import_value, decimal_places)}% - {round(export_value, decimal_places)}%"
return f"{round(export_value, decimal_places)}% - {round(import_value, decimal_places)}%"
|
[
"noreply@github.com"
] |
uktrade.noreply@github.com
|
e706179c11effcfa8f133d63d2655724fca4d1e9
|
0005e05b9d8b8ad0d3c3c0539b2ded9db6e9f1dd
|
/codechef_client/models/tag.py
|
4cdd6e64295823ef02e369ae6ce1a056970ea646
|
[] |
no_license
|
termicoder/codechef-client-lib
|
a3e3de2b300355c5daa5ed3fad03a9859af13d86
|
74d6b21787c75a987e3451751f5554e4cc6cf469
|
refs/heads/master
| 2020-03-27T17:58:45.298121
| 2018-09-30T18:03:14
| 2018-09-30T18:03:14
| 146,889,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,094
|
py
|
# coding: utf-8
"""
CodeChef API
CodeChef API to support different applications. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Tag(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tag': 'str',
'type': 'str',
'count': 'int'
}
attribute_map = {
'tag': 'tag',
'type': 'type',
'count': 'count'
}
def __init__(self, tag=None, type=None, count=None): # noqa: E501
"""Tag - a model defined in Swagger""" # noqa: E501
self._tag = None
self._type = None
self._count = None
self.discriminator = None
if tag is not None:
self.tag = tag
if type is not None:
self.type = type
if count is not None:
self.count = count
@property
def tag(self):
"""Gets the tag of this Tag. # noqa: E501
Value # noqa: E501
:return: The tag of this Tag. # noqa: E501
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this Tag.
Value # noqa: E501
:param tag: The tag of this Tag. # noqa: E501
:type: str
"""
self._tag = tag
@property
def type(self):
"""Gets the type of this Tag. # noqa: E501
author/tag # noqa: E501
:return: The type of this Tag. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Tag.
author/tag # noqa: E501
:param type: The type of this Tag. # noqa: E501
:type: str
"""
self._type = type
@property
def count(self):
"""Gets the count of this Tag. # noqa: E501
Count of problems with this tag # noqa: E501
:return: The count of this Tag. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this Tag.
Count of problems with this tag # noqa: E501
:param count: The count of this Tag. # noqa: E501
:type: int
"""
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Tag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"diveshuttamchandani@gmail.com"
] |
diveshuttamchandani@gmail.com
|
0ee27c2b6c2029409b39052286ba40d81a836616
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/SjShHBJJMM/YW_HBJJMM_SHSJ_067.py
|
4cb90cd9223c79893514c907a5e29a58cc20a03f
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,142
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/MoneyFund/moneyfundservice")
from mfmainService import *
from mfQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/MoneyFund/moneyfundmysql")
from mfCaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_HBJJMM_SHSJ_067(xtp_test_case):
# YW_HBJJMM_SHSJ_067
def test_YW_HBJJMM_SHSJ_067(self):
title = '上海A股股票交易日五档即成转限价卖——错误的价格(价格10亿)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '1', '111', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': 1000000000,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
3e0492db360ce01a76f540ff3bf14d2133ae8153
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_bogies.py
|
e575bb083362fdfd4e25d0bf21f424dc5070f88d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from xai.brain.wordbase.nouns._bogy import _BOGY
#calss header
class _BOGIES(_BOGY, ):
def __init__(self,):
_BOGY.__init__(self)
self.name = "BOGIES"
self.specie = 'nouns'
self.basic = "bogy"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dffede7cbbfa98929853b8241f6a1e945007f560
|
e5fb2d912415c302221604126afa7cbbb0a039c0
|
/keras_gym/policies/test_special.py
|
d19afe8e363fc4399127c8f76a179ab42414bef4
|
[
"MIT"
] |
permissive
|
KristianHolsheimer/keras-gym
|
fc034025a1180b1124fe1a25886b54088d2f3552
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
refs/heads/master
| 2021-06-28T21:57:50.122753
| 2020-09-30T04:29:15
| 2020-09-30T04:29:15
| 174,637,157
| 17
| 5
|
MIT
| 2019-08-02T22:48:41
| 2019-03-09T02:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from gym.envs.toy_text.frozen_lake import FrozenLakeEnv, RIGHT, DOWN
from .special import UserInputPolicy
class MockInputFunction:
def __init__(self, return_value=None):
self.return_value = return_value
self._orig_input_fn = __builtins__['input']
def _mock_input_fn(self, prompt):
print(prompt + str(self.return_value))
return self.return_value
def __enter__(self):
__builtins__['input'] = self._mock_input_fn
def __exit__(self, type, value, traceback):
__builtins__['input'] = self._orig_input_fn
class TestUserInputPolicy:
def test_expected(self):
env = FrozenLakeEnv(is_slippery=False)
policy = UserInputPolicy(env)
s = env.reset()
env.render()
for i in [RIGHT, RIGHT, DOWN, DOWN, DOWN, RIGHT]:
with MockInputFunction(return_value=i):
a = policy(s)
s, r, done, info = env.step(a)
env.render()
if done:
break
|
[
"kristian.holsheimer@gmail.com"
] |
kristian.holsheimer@gmail.com
|
097439d4e5e15a04cbe777f77fd0434256fd16d1
|
a61ca7b89ef5817b2027239ece9dd175f776c8f3
|
/rcsb/app/chem/LogFilterUtils.py
|
86c6b9113eaef1e38f51a767d80d66d89057586c
|
[
"Apache-2.0"
] |
permissive
|
rcsb/py-rcsb_app_chem
|
7da2941f6e0d0f8ff0f5a802a3edb689d283659b
|
64ca10e6ccf8b604fa3d16ab72406408b22c0aca
|
refs/heads/master
| 2023-08-17T21:33:51.660687
| 2023-01-09T17:30:07
| 2023-01-09T17:30:07
| 245,858,180
| 0
| 0
|
Apache-2.0
| 2023-01-09T17:30:08
| 2020-03-08T17:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 866
|
py
|
##
# File: LogFilterUtils.py
# Date: 29-Jun-2020 jdw
#
# Pre-filter for Gunicorn/Uvicorn health check requests -
##
# pylint: disable=E1101
import logging
logger = logging.getLogger(__name__)
class HealthCheckFilter(logging.Filter):
def filter(self, record):
return record.getMessage().find("/healthcheck") == -1
class LogFilterUtils(object):
def __init__(self):
pass
def addFilters(self):
logger.debug("Current loggers are: %r", [name for name in logging.root.manager.loggerDict]) # pylint: disable=no-member
for name in logging.root.manager.loggerDict: # pylint: disable=no-member
if any(x in name for x in ["uvicorn", "gunicorn"]):
logger.debug("Add filter to logger %r", name)
loggerT = logging.getLogger(name)
loggerT.addFilter(HealthCheckFilter())
|
[
"john.westbrook@rcsb.org"
] |
john.westbrook@rcsb.org
|
8ef9af340d5e228e081e4752208ca6f0fc86e61c
|
45284836ae85685226b1f1e3b83e207e184aee0e
|
/05_ProbabilityAndStatistics/01_ProbAndStatsInPython_Beginner/01_IntroductionToStatistics/11_MeasuresOfCentralTendency.py
|
f822187cd0dd31bf9867f58f1fd34ff63b9187d8
|
[] |
no_license
|
gaurab123/DataQuest
|
5060efc3d3449e6e098cb77d7fed913516aabdbd
|
a9da9a90fab639d239340edfc7d0b2010edf2b35
|
refs/heads/master
| 2021-09-14T15:10:13.047034
| 2018-05-02T19:11:23
| 2018-05-02T19:11:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
print("this mission cannot be run locally as the data used is loaded \"behind the scenes\" and I really don't have access to it")
import matplotlib.pyplot as plt
# Let's put a line over our plot that shows the mean.
# This is the same histogram we plotted for skew a few screens ago.
plt.hist(test_scores_normal)
# We can use the .mean() method of a numpy array to compute the mean.
mean_test_score = test_scores_normal.mean()
# The axvline function will plot a vertical line over an existing plot.
plt.axvline(mean_test_score)
# Now we can show the plot and clear the figure.
plt.show()
# When we plot test_scores_negative, which is a very negatively skewed distribution, we see that the small values on the left pull the mean in that direction.
# Very large and very small values can easily skew the mean.
# Very skewed distributions can make the mean misleading.
plt.hist(test_scores_negative)
plt.axvline(test_scores_negative.mean())
plt.show()
# We can do the same with the positive side.
# Notice how the very high values pull the mean to the right more than we would expect.
plt.hist(test_scores_positive)
plt.axvline(test_scores_positive.mean())
plt.show()
mean_normal = test_scores_normal.mean()
mean_negative = test_scores_negative.mean()
mean_positive = test_scores_positive.mean()
print(mean_normal)
print(mean_negative)
print(mean_positive)
|
[
"kenneth.kite@gmail.com"
] |
kenneth.kite@gmail.com
|
c4c6e0af2a87a16415a3f0575945f66d748ea0f4
|
2ed1cccb49ee1549f09747061a2513fb053c707d
|
/20181004/DProposed_gpu3.py
|
91281bf826beb09c8480f4a1812fba4e8869a002
|
[] |
no_license
|
hhjung1202/Prob_network
|
1c766ef5191727a63a38654622e21f0d986b923e
|
dedd4e525c9393f15452709dda377ceee9849c15
|
refs/heads/master
| 2020-03-22T11:42:27.705442
| 2018-11-11T14:29:39
| 2018-11-11T14:29:39
| 139,990,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,940
|
py
|
import torch
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
from DPmodel import *
import os
import torch.backends.cudnn as cudnn
import time
import utils
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
def main(model_dir, model, dataset, batch_size=128):
utils.default_model_dir = model_dir
utils.c = None
utils.str_w = ''
# model = model
lr = 0.1
start_time = time.time()
if dataset == 'cifar10':
if batch_size is 128:
train_loader, test_loader = utils.cifar10_loader()
elif batch_size is 64:
train_loader, test_loader = utils.cifar10_loader_64()
elif dataset == 'cifar100':
train_loader, test_loader = utils.cifar100_loader()
if torch.cuda.is_available():
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
print("USE", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model).cuda()
cudnn.benchmark = True
else:
print("NO GPU -_-;")
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
criterion = nn.CrossEntropyLoss().cuda()
start_epoch = 0
checkpoint = utils.load_checkpoint(model_dir)
if not checkpoint:
pass
else:
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
utils.init_learning(model.module)
for epoch in range(start_epoch, 300):
if epoch < 150:
learning_rate = lr
elif epoch < 225:
learning_rate = lr * 0.1
else:
learning_rate = lr * 0.01
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
train(model, optimizer, criterion, train_loader, epoch, True)
test(model, criterion, test_loader, epoch, True)
utils.switching_learning(model.module)
print('switching_learning to Gate')
train(model, optimizer, criterion, train_loader, epoch, False)
test(model, criterion, test_loader, epoch, False)
utils.switching_learning(model.module)
print('switching_learning to Gate')
if epoch % 5 == 0:
model_filename = 'checkpoint_%03d.pth.tar' % epoch
utils.save_checkpoint({
'epoch': epoch,
'model': model,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_filename, model_dir)
now = time.gmtime(time.time() - start_time)
weight_extract(model, optimizer, criterion, train_loader, epoch)
utils.conv_weight_L1_printing(model.module)
print('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
def train(model, optimizer, criterion, train_loader, epoch, is_main):
model.train()
train_loss = 0
total = 0
correct = 0
for batch_idx, (data, target) in enumerate(train_loader):
if torch.cuda.is_available():
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += predicted.eq(target.data).cpu().sum()
if batch_idx % 10 == 0 and is_main is True:
utils.print_log('Epoch: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
print('Epoch: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
elif batch_idx % 10 == 0 and is_main is False:
utils.print_log('SWICH: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
print('SWICH: {} | Batch: {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{})'
.format(epoch, batch_idx, train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
def weight_extract(model, optimizer, criterion, train_loader, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if torch.cuda.is_available():
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
utils.c = target.view(-1,1) # batch array torch.tensor[128]
utils.c = utils.c.type(torch.cuda.FloatTensor)
utils.weight_extract_densenet(model.module)
for i in utils.c:
for j in i:
utils.str_w = utils.str_w + str(j.tolist()) + ','
utils.str_w += '\n'
utils.save_to_csv()
utils.str_w = ''
if batch_idx % 100 == 0:
print('Epoch: {}'.format(epoch))
def test(model, criterion, test_loader, epoch, is_main):
model.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (data, target) in enumerate(test_loader):
if torch.cuda.is_available():
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
outputs = model(data)
loss = criterion(outputs, target)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += predicted.eq(target.data).cpu().sum()
max_result.append(correct)
if is_main is True:
utils.print_log('# TEST : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}%) | Max: ({})'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
print('# TEST : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}% | Max: ({}))'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
elif is_main is False:
utils.print_log('$ TEST_S : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}%) | Max: ({})'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
print('$ TEST_S : Epoch : {} | Loss: ({:.4f}) | Acc: ({:.2f}%) ({}/{}) | Err: ({:.2f}% | Max: ({}))'
.format(epoch, test_loss/(batch_idx+1), 100.*correct/total, correct, total, 100-100.*correct/total, max(max_result)))
layer_set = [14, 20, 32, 44, 56, 110]
def do_learning(model_dir, db, layer, num_gate=0, batch_s=128, block_config=(6,6,6), is_bottleneck=True):
global max_result
max_result = []
model_selection = DenseNet(num_classes=10, num_gate=num_gate
, block_config=block_config, is_bottleneck=is_bottleneck)
dataset = 'cifar' + str(db)
main(model_dir, model_selection, dataset, batch_s)
if __name__=='__main__':
for i in range(10):
if i % 2 == 0:
block_config = (12, 12, 12)
is_bottleneck = False
else:
block_config = (6,6,6)
is_bottleneck = True
model_dir = '../hhjung/Dense_Prop/cifar10/DenseNet40/' + str(i)
do_learning(model_dir, 10, layer_set[5], num_gate=0
, batch_s=64, block_config=block_config, is_bottleneck=is_bottleneck)
|
[
"hhjung1202@naver.com"
] |
hhjung1202@naver.com
|
e69f0f7583c1022af9442415e61c2769e37c4122
|
dbf770eef8233f7da1850309cc4b7145bd8d67f1
|
/PYTHON-ADVANCED-SEPT-2020/PYTHON ADVANCED/03_MULTYDIMENSINAL LISTS/EXERCISE/06_chess.py
|
654a9eb872ce75844a3566d42fa88934b8ec214a
|
[] |
no_license
|
vasil-panoff/PYTHON-ADVANCED-SEPT-2020_repo
|
610a37d1681ce9d0aa86628523620e1571b438dd
|
c63434f91de42d2f1241b6d76a96c7c63711c1d0
|
refs/heads/master
| 2023-03-22T07:44:53.620221
| 2021-03-15T20:42:14
| 2021-03-15T20:42:14
| 309,829,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
possible_moves = (
(-1, -2),
(-1, 2),
(1, -2),
(1, 2),
(2, -1),
(2, 1),
(-2, 1),
(-2, -1),
)
board_size = int(input())
matrix = [['0'] * board_size for i in range(board_size)]
def is_valid(i, j):
if i < 0 or j < 0 or i >= board_size or j >= board_size:
return False
return matrix[i][j] == "K"
knights_dict = {}
def update_knights(i1, j1, i2, j2):
if not is_valid(i2, j2):
return
if (i2, j2) not in knights_dict:
knights_dict[(i2, j2)] = []
knights_dict[(i2, j2)].append((i1, j1))
if (i1, j1) not in knights_dict:
knights_dict[i1, j1] = []
knights_dict[(i1, j1)].append((i2, j2))
for i in range(board_size):
row = list(input())
for j in range(board_size):
if row[j] == "K":
matrix[i][j] = "K"
for move_i, move_j in possible_moves:
i1 = i
j1 = j
i2 = i + move_i
j2 = j + move_j
update_knights(i1, j1, i2, j2)
num_removed = 0
max_knight = get_max_knight(knights_dict)
while len(max_knight) > 0:
remove_knight(matrix, max_knight)
knights_dict
num_removed += 1
print(num_removed)
|
[
"vasil.panov@gmail.com"
] |
vasil.panov@gmail.com
|
00784a17b99b4077db9e72d37bf5cb26749d3043
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_test/_data/sanity/code-smell/changelog.py
|
710b10f6c08ec6f6580b2837b46f9a06e6302fd6
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import subprocess
def main():
paths = sys.argv[1:] or sys.stdin.read().splitlines()
allowed_extensions = ('.yml', '.yaml')
config_path = 'changelogs/config.yaml'
# config must be detected independent of the file list since the file list only contains files under test (changed)
has_config = os.path.exists(config_path)
paths_to_check = []
for path in paths:
if path == config_path:
continue
if path.startswith('changelogs/fragments/.'):
if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'):
continue
print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0))
continue
ext = os.path.splitext(path)[1]
if ext not in allowed_extensions:
print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions)))
paths_to_check.append(path)
if not has_config:
print('changelogs/config.yaml:0:0: config file does not exist')
return
if not paths_to_check:
return
cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check
subprocess.call(cmd) # ignore the return code, rely on the output instead
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
9c7efaaa782f42236b3ee163464ef9d613bc033c
|
0a5c103662e2ccea7698480bca28fb5c285aeafb
|
/info/dicom.py
|
033a6a5fb0ed810ef20d7d4d98a1b7d9b7f8d109
|
[] |
no_license
|
joanshen0508/image_preprocessing
|
a8b9dc90e92552ca11af8b220a2ce235a558aef1
|
478e63593884d572a049590588df158c59447bab
|
refs/heads/master
| 2022-04-02T17:08:56.559871
| 2019-10-29T15:24:48
| 2019-10-29T15:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,477
|
py
|
from __future__ import division, print_function
import os
from os.path import join
from pandas import DataFrame
import re
import dicom
from inout.io_common import get_dicom_files_in_folder
class DicomDataSummary():
"""
This function allows the generation of information stored on nrrd files.
"""
def __init__(self, **kwargs):
self.input_folder = 'input'
self.output_folder = 'output'
# All the arguments that are passed to the constructor of the class MUST have its name on it.
for arg_name, arg_value in kwargs.items():
self.__dict__["_" + arg_name] = arg_value
def __getattr__(self, attr):
'''Generic getter for all the properties of the class'''
return self.__dict__["_" + attr]
def __setattr__(self, attr, value):
'''Generic setter for all the properties of the class'''
self.__dict__["_" + attr] = value
def generate_data_summary(self, folder_name_regex, file_name='data_summary'):
"""It generates a small summary from the data_sum as a CSV file (shape and voxel size)
:param folder_name_regex:
:return:
"""
cases = [x for x in os.listdir(self._input_folder) if os.path.isdir(join(self._input_folder, x))]
cases.sort()
colums_dic = {'Date':'AcquisitionDate',
'EchoTime':'EchoTime',
'EchoTrainLength':'EchoTrainLength',
'Manufacturer':'Manufacturer',
'Model':'ManufacturerModelName',
'Modality':'Modality',
'RepetitionTime': 'RepetitionTime',
'Orientation': 'ImageOrientationPatient'}
extra_columns = ['Size', 'Spacing', 'PixelSize']
all_columns = extra_columns + list(colums_dic.keys())
data_sum = DataFrame(index=cases, columns=all_columns)
# In this case we look for folders inside each case
for c_case in cases:
print(F"---------- {c_case}----------")
try:
matched_folders = [x for x in os.listdir(join(self._input_folder, c_case)) if not (re.search(folder_name_regex, x) is None)]
if len(matched_folders) > 1:
print(F'Warning: more than one folder matched: {matched_folders}')
if len(matched_folders) == 0:
print(F'Warning: folder not matched for {c_case}')
continue
else:
final_folder_name = join(self._input_folder, c_case, matched_folders[0])
all_dicom_files = get_dicom_files_in_folder(final_folder_name)
ds = dicom.read_file(all_dicom_files[0]) # Reads dataset
for c_name, c_key in colums_dic.items():
data_sum.loc[c_case][c_name] = eval(F'ds.{c_key}')
data_sum.loc[c_case]['Size'] = F'{ds.Rows} x {ds.Columns} x {len(all_dicom_files)}'
spacing = ds.PixelSpacing
data_sum.loc[c_case]['Spacing'] = F'{spacing[0]} x {spacing[1]} x {ds.SliceThickness}'
data_sum.loc[c_case]['PixelSize'] = F'{spacing[0]*spacing[1]*ds.SliceThickness:.2f}'
except Exception as e:
print(F'Failed for folder {c_case}: {e}')
continue
data_sum.to_csv(join(self._output_folder, file_name))
|
[
"olmozavala@gmail.com"
] |
olmozavala@gmail.com
|
9d1e8ffeefbf7cee1e32d4c38a282759cf4dd220
|
577ba42cbf0a3230966ac66ef60fd401486e4c06
|
/website/apps/core/migrations/0021_transfer_year.py
|
96778623b064dec78ae6724511bdcd803f81ac46
|
[
"Apache-2.0"
] |
permissive
|
shh-dlce/pulotu
|
984ca86de3ffe03e83bbb15b0d497f1ebf190ecd
|
82acbb8a3b7f3ec3acc76baffd4047265a77f7d3
|
refs/heads/master
| 2021-01-10T03:51:13.337840
| 2015-12-09T09:46:55
| 2015-12-09T09:46:55
| 46,917,922
| 2
| 0
|
Apache-2.0
| 2021-11-16T11:51:48
| 2015-11-26T09:48:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,072
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for source in orm.Source.objects.all():
source.year_new = source.year
source.save()
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("Cannot reverse this migration!")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.culture': {
'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'coder': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'core.language': {
'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"},
'abvdcode': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'core.section': {
'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.source': {
'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'year': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'year_new': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
symmetrical = True
|
[
"xrotwang@googlemail.com"
] |
xrotwang@googlemail.com
|
0cb269eb77b00fc282b0b7a98450a744901f9bee
|
af4abf0a22db1cebae466c56b45da2f36f02f323
|
/parser/fase2/team08/Tytus_SQLPARSER_G8/optimizacion/Instrucciones/C3D/LlamadaC3D.py
|
dc3d1a8494a7f8ac134381a1aa4f9d6d7c4e705b
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
joorgej/tytus
|
0c29408c09a021781bd3087f419420a62194d726
|
004efe1d73b58b4b8168f32e01b17d7d8a333a69
|
refs/heads/main
| 2023-02-17T14:00:00.571200
| 2021-01-09T00:48:47
| 2021-01-09T00:48:47
| 322,429,634
| 3
| 0
|
MIT
| 2021-01-09T00:40:50
| 2020-12-17T22:40:05
|
Python
|
UTF-8
|
Python
| false
| false
| 594
|
py
|
from optimizacion.Instrucciones.TablaSimbolos.InstruccionC3D import InstruccionC3D
class LlamadaC3D(InstruccionC3D):
def __init__(self, id,linea, columna):
InstruccionC3D.__init__(self,linea,columna)
self.id = id
print("ENTRO A expresiones")
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(" linea: " + str(self.linea) + " columna: " + str(self.columna))
if self.id != None :
if(self.id == "main"):
return self.id + "()"
else:
return self.id +"()"
|
[
"michikatrins@gmail.com"
] |
michikatrins@gmail.com
|
b93260df15ec3b7ec598572a2cee1d41b1db0c22
|
41a672c9505b5b53c58a01d5455acc410949aa24
|
/tests/aoutgoing/negative/group/C_39.py
|
2f601a227919b3259ac2e7c4da1ce6d2ad77009c
|
[] |
no_license
|
Alexsorgo/mobile_iOS
|
b045a0ea058726841c88158be8407b7ae45e893e
|
7e298f890b408cedad9db9d0aefeccd9c10d6002
|
refs/heads/master
| 2022-12-12T17:26:14.039876
| 2020-03-18T06:34:56
| 2020-03-18T06:34:56
| 248,154,882
| 0
| 0
| null | 2021-06-02T01:13:05
| 2020-03-18T06:25:17
|
Python
|
UTF-8
|
Python
| false
| false
| 956
|
py
|
from configs import config
from enums import error_enums
from screens.group.group_screen import GroupScreen
from controls.menu import Menu
from tests.aoutgoing.base_test import BaseTest
from utils.logs import log
from utils.verify import Verify
class TestC39(BaseTest):
"""
User has the ability to create group chat with 1 more user
"""
EMPTY_NAME = ''
FRIEND = config.AMERICA_FIRSTNAME + ' ' + config.AMERICA_LASTNAME
def test_c39(self):
log.info("Create group with empty group name")
menu = Menu(self.driver)
group = GroupScreen(self.driver)
menu.go_to(menu.wenums.GROUPS, [menu.wenums.NEW_GROUP])
group.add_user(self.FRIEND)
group.tap_done()
group.tap_group_name()
group.set_group_name(self.EMPTY_NAME)
group.tap_save()
log.info("Verify group doesn't create")
Verify.true(group.error_verify(error_enums.GROUP_NAME_MIN), "Group created")
|
[
"oleksii_mishchenko@epam.com"
] |
oleksii_mishchenko@epam.com
|
0afb5da3c5bf377521020e90704fbd297b46c016
|
e5b778a273e3888ad0575a9dada39d458158127a
|
/students/migrations/0009_lesson_icon.py
|
a4f54c33999a2c365ea2cd47c5cf66dca551542c
|
[] |
no_license
|
SevenLines/django-tealeaf
|
896784baead7b9514e83edad8c3c2defdcdd060b
|
959dbcbdd37a4e8f45de400e71710c5e746a97da
|
refs/heads/master
| 2021-01-23T00:01:43.793383
| 2015-05-15T15:58:52
| 2015-05-15T15:58:52
| 17,891,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('filer', '__first__'),
('students', '0008_auto_20141128_1807'),
]
operations = [
migrations.AddField(
model_name='lesson',
name='icon',
field=filer.fields.image.FilerImageField(default=None, blank=True, to='filer.Image', null=True),
preserve_default=True,
),
]
|
[
"mmailm@mail.ru"
] |
mmailm@mail.ru
|
e7a9408e49112ddd9f5aafdb874c3377f4ad2d1c
|
767745e9c6207db9f6a9cf4f0be1af4732e7a111
|
/raiden/tests/integration/transfer/test_directransfer_invalid.py
|
d88e7c9f8a32e7f40677ba8b9fc4c75d7c1a3340
|
[
"MIT"
] |
permissive
|
gcarq/raiden
|
ecc91860b99447028baea7fd171c19996644a5ef
|
82241c6da9188c4e029aef3bb42f0ab9f055c0e4
|
refs/heads/master
| 2020-03-10T03:31:55.174762
| 2018-04-11T19:18:21
| 2018-04-11T19:18:21
| 129,167,527
| 0
| 0
|
MIT
| 2018-04-11T23:52:12
| 2018-04-11T23:52:12
| null |
UTF-8
|
Python
| false
| false
| 6,738
|
py
|
# -*- coding: utf-8 -*-
import pytest
from raiden.api.python import RaidenAPI
from raiden.messages import DirectTransfer
from raiden.transfer import channel
from raiden.transfer.state import EMPTY_MERKLE_ROOT
from raiden.tests.utils.blockchain import wait_until_block
from raiden.tests.utils.factories import (
UNIT_HASHLOCK,
make_address,
make_privkey_address,
)
from raiden.tests.utils.transfer import (
assert_synched_channel_state,
get_channelstate,
sign_and_inject,
)
@pytest.mark.skip(reason='direct_transfer_async doesnt return AsyncResult anymore')
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('number_of_nodes', [2])
def test_failsfast_directtransfer_exceeding_distributable(
raiden_network,
token_addresses,
deposit
):
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
async_result = alice_app.raiden.direct_transfer_async(
token_address,
deposit * 2,
bob_app.raiden.address,
identifier=1,
)
assert not async_result.get_nowait()
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidtoken(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
identifier = 1
invalid_token_address = make_address()
channel_identifier = channel0.identifier
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=1,
token=invalid_token_address,
channel=channel_identifier,
transferred_amount=0,
recipient=app1.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
assert_synched_channel_state(
token_address,
app0, deposit, [],
app1, deposit, [],
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidlocksroot(raiden_network, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
balance0 = channel.get_balance(channel0.our_state, channel0.partner_state)
balance1 = channel.get_balance(channel0.partner_state, channel0.our_state)
identifier = 1
invalid_locksroot = UNIT_HASHLOCK
channel_identifier = channel0.identifier
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=1,
token=token_address,
channel=channel_identifier,
transferred_amount=0,
recipient=app1.raiden.address,
locksroot=invalid_locksroot,
)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
assert_synched_channel_state(
token_address,
app0, balance0, [],
app1, balance1, []
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidsender(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
other_key, other_address = make_privkey_address()
channel0 = get_channelstate(app0, app1, token_address)
channel_identifier = channel0.identifier
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=token_address,
channel=channel_identifier,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
direct_transfer_message,
other_key,
other_address,
app0,
)
assert_synched_channel_state(
token_address,
app0, deposit, [],
app1, deposit, []
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidnonce(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
transferred_amount = 10
same_identifier = 1
event = channel.send_directtransfer(
channel0,
transferred_amount,
same_identifier,
)
direct_transfer_message = DirectTransfer.from_event(event)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
# Send a *different* direct transfer with the *same nonce*
invalid_transferred_amount = transferred_amount // 2
invalid_direct_transfer_message = DirectTransfer(
identifier=same_identifier,
nonce=1,
token=token_address,
channel=channel0.identifier,
transferred_amount=invalid_transferred_amount,
recipient=app1.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
invalid_direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
assert_synched_channel_state(
token_address,
app0, deposit - transferred_amount, [],
app1, deposit + transferred_amount, [],
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('settle_timeout', [30])
def test_received_directtransfer_closedchannel(raiden_network, token_addresses, deposit):
app0, app1 = raiden_network
token_address = token_addresses[0]
channel0 = get_channelstate(app0, app1, token_address)
RaidenAPI(app1.raiden).channel_close(
token_address,
app0.raiden.address,
)
wait_until_block(
app0.raiden.chain,
app0.raiden.chain.block_number() + 1,
)
# Now receive one direct transfer for the closed channel
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=token_address,
channel=channel0.identifier,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=EMPTY_MERKLE_ROOT,
)
sign_and_inject(
direct_transfer_message,
app0.raiden.private_key,
app0.raiden.address,
app1,
)
# The local state must not change since the channel is already closed
assert_synched_channel_state(
token_address,
app0, deposit, [],
app1, deposit, [],
)
|
[
"hack.augusto@gmail.com"
] |
hack.augusto@gmail.com
|
b020ce1d7374b7195c3545ce178c7b9387f9ddd1
|
72b8e2d69cca8b5ecd28e61ef61fef85f9dd0489
|
/q190.py
|
3bf0f6319123cdb7f2dd25ae44e6f074a9eafef1
|
[] |
no_license
|
maples1993/LeetCode
|
f975bc8570729d998481b097ee04effe5a7c5977
|
032016724564d0bee85f9e1b9d9d6c769d0eb667
|
refs/heads/master
| 2020-03-27T22:05:07.397746
| 2018-11-07T06:13:56
| 2018-11-07T06:13:56
| 147,203,152
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
"""
Date: 2018/9/6
"""
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
n &= 0xFFFFFFFF
print(bin(n))
res = 0 & 0xFFFFFFFF
count = 0
while count < 32:
count += 1
res <<= 1
if n & 1 == 1:
res += 1
n >>= 1
return res
print(Solution().reverseBits(43261596))
|
[
"panchao1993@126.com"
] |
panchao1993@126.com
|
e1fc43f35600eb1ab30bcb687acd093d5345c74f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_veritable.py
|
c4260f18bbed72556a78374e7679857fe6dc69a3
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
#calss header
class _VERITABLE():
def __init__(self,):
self.name = "VERITABLE"
self.definitions = [u'used to describe something as another, more exciting, interesting, or unusual thing, as a way of emphasizing its character: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
55eb160926cb77920b63568d4be18c54eeebdb2d
|
41b59a9c8381fa3a92f5d2c37c91261afb9c82c4
|
/QCDEventShape/2017/MC/test/crab_bin_py8_3200_inf.py
|
ad911d60a95de92ad286c8ea8f0a46bafbafeab1
|
[] |
no_license
|
Sumankkundu/ChargedParticle
|
c6d4f90b55df49321df2ecd758bb1f39db896f8c
|
eb5bada24b37a58ded186d6e5d2d7bd00898fefe
|
refs/heads/master
| 2023-07-15T03:34:33.377203
| 2021-08-31T05:01:32
| 2021-08-31T05:01:32
| 231,091,587
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,495
|
py
|
#from CRABClient.UserUtilities import config, getUsernameFromSiteDB
from CRABClient.UserUtilities import config
config = config()
config.General.requestName ='ESVQCD_UL_Ptbinned_3200toinf_tuneCP5_bin'
#config.General.workArea = 'crab_projects_1'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'Run_QCD_test_miaod_v2_106x_mc_cfg.py'
#config.JobType.maxMemoryMB = 9000 # Default is 2500 : Max I have used is 13000
#config.JobType.maxJobRuntimeMin = 2750 #Default is 1315; 2750 minutes guaranteed to be available; Max I have used is 9000
#config.JobType.numCores = 4
config.JobType.inputFiles= [
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_JRV2_MC_PtResolution_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_JRV2_MC_SF_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunB_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunC_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunD_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunE_V5_DATA_UncertaintySources_AK4PFchs.txt",
"/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/AK4PFCHS_Summer19UL/Summer19UL17_RunF_V5_DATA_UncertaintySources_AK4PFchs.txt"
]
config.Data.inputDataset ='/QCD_Pt_3200toInf_TuneCP5_13TeV_pythia8/RunIISummer19UL17MiniAOD-106X_mc2017_realistic_v6-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'EventBased'
#config.Data.splitting = 'LumiBased'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 10 # for Automatic must be 180-2700 range
config.Data.unitsPerJob = 1 #For Filebased or Lumibased
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
#config.Data.outLFNDirBase = '/store/user/%s/' % (sukundu)
config.Data.publication = True
config.Data.outputDatasetTag = 'MC_PY82017UL_Bin'
config.JobType.allowUndistributedCMSSW = True
config.Site.storageSite ='T2_IN_TIFR'
|
[
"skundu91phys@gmail.com"
] |
skundu91phys@gmail.com
|
64592d3ee4f2219d3ea1f98f687bdb1984f866da
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02780/s702903623.py
|
ef5c9f02ab956fe90728da489ecd4bc87f90841f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
n,k = map(int,input().split())
P = list(map(int,input().split()))
P[0] = (P[0]+1.)/2
for i in range(1,len(P)):
P[i] = (P[i]+1.)/2
P[i] = P[i-1]+P[i]
ans = 0.
if n==1:
ans = P[0]
elif len(P)-k==0:
ans = P[k-1]
else:
for i in range(len(P)-k):
ans = max(ans,(P[i+k]-P[i]))
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5208906c09939f76f644bef4f999ef65b8a1cfae
|
37438771565238194ea997fa65619bd32c823706
|
/catkin_ws/17-11-16/LPH/build/catkin_generated/order_packages.py
|
24ce42469160b8cc3411cbaef6a5190b3592e0f2
|
[] |
no_license
|
Aaron9477/restore
|
b040b8be695c513946c0243c4acb735f427d8bba
|
8dc13ed7cf0c4e5cde911169d11e330d826f40bd
|
refs/heads/master
| 2021-09-15T10:50:59.969952
| 2018-05-31T03:11:55
| 2018-05-31T03:11:55
| 110,834,815
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/zq610/LPH/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/opt/ros/kinetic".split(';') if "/opt/ros/kinetic" != "" else []
|
[
"869788668@qq.com"
] |
869788668@qq.com
|
310ba0cb9368a175620ca3cbcbd62104bf3f9f8b
|
edc1f1369794a4a1c499c6e9d5fe49a712657611
|
/algorithms/leetcode_all/560.subarray-sum-equals-k/subarray-sum-equals-k.py
|
74c28e9996672f15fe435da46bf9edd7cf5ffdc2
|
[] |
no_license
|
williamsyb/mycookbook
|
93d4aca1a539b506c8ed2797863de6da8a0ed70f
|
dd917b6eba48eef42f1086a54880bab6cd1fbf07
|
refs/heads/master
| 2023-03-07T04:16:18.384481
| 2020-11-11T14:36:54
| 2020-11-11T14:36:54
| 280,005,004
| 2
| 0
| null | 2023-03-07T02:07:46
| 2020-07-15T23:34:24
|
Python
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
preSum = ans = 0
visit = {0: 1}
for i, n in enumerate(nums):
preSum += n
ans += visit.get(preSum - k, 0)
visit[preSum] = visit.get(preSum, 0) + 1
return ans
|
[
"william_sun1990@hotmail.com"
] |
william_sun1990@hotmail.com
|
d6b0539a2cd34a3318a634029493799c8d1029ff
|
2aec9c5e8c72b731d3abf22f2a407fe09c1cde09
|
/ZQZ510/ZQZ510/spiders/zqz.py
|
3a4ae2a8fc42615dd7eaaf1a56965897c452c5d3
|
[] |
no_license
|
jiangyg/ZWFproject
|
8b24cc34970ae0a9c2a2b0039dc527c83a5862b5
|
aa35bc59566d92721f23d2dd00b0febd268ac2dd
|
refs/heads/master
| 2020-09-26T17:01:00.229380
| 2019-11-15T13:16:21
| 2019-11-15T13:16:21
| 226,297,631
| 0
| 1
| null | 2019-12-06T09:55:37
| 2019-12-06T09:55:36
| null |
UTF-8
|
Python
| false
| false
| 5,422
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import time
import json
from ZQZ510.items import Zqz510Item
empty_word = 'null'
class ZqzSpider(scrapy.Spider):
name = 'zqz'
allowed_domains = ['zqz510.com']
start_urls = ['http://login.zqz510.com/judgmentDoc']
def parse(self, response):
url = 'http://api.zqz510.com//tmof/query?ftxt=&ti=&apS=&pdStart=&pdEnd=&ty=&psty=&law=&litem=&pageNum=1' \
'&apS=&apD=&ag=&judgd=&tid=&cid=&callback=_jqjsp&_{}='.format(str(int(time.time() * 1000)))
self.cookie = {
'uid': '213facea-5ac7-4069-ae4a-97168d559ebc',
'oid': 'UAGAP00003919',
'JSESSIONID': '9867C3C37D24634CB9D44D1AA5C6188F',
'c': '82f5dd5f-f8ae-459b-9907-fd0bb01d97cb',
}
yield scrapy.Request(url=url, callback=self.parse_first, cookies=self.cookie)
def parse_first(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
total = int(json_text['total'])
all_page = int(total / 10) + 1
for page in range(all_page):
url = 'http://api.zqz510.com//tmof/query?ftxt=&ti=&apS=&pdStart=&pdEnd=&ty=&psty=&law=&litem=&pageNum={}' \
'&apS=&apD=&ag=&judgd=&tid=&cid=&callback=_jqjsp&_{}='.format(str(page + 1), str(int(time.time() * 1000)))
yield scrapy.Request(url=url, callback=self.parse_list, cookies=self.cookie)
def parse_list(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
for data in json_text['data']:
item = Zqz510Item()
if 'agS' in data:
item['agS'] = data['agS']
else:
item['agS'] = empty_word
if 'agidS' in data:
item['agidS'] = data['agidS']
else:
item['agidS'] = empty_word
if 'an' in data:
item['an'] = data['an']
else:
item['an'] = empty_word
if 'anDest' in data:
item['anDest'] = data['anDest']
else:
item['anDest'] = empty_word
if 'anList' in data:
item['anList'] = str(data['anList'])
else:
item['anList'] = empty_word
if 'apS' in data:
item['apS'] = data['apS']
else:
item['apS'] = empty_word
if 'apidS' in data:
item['apidS'] = data['apidS']
else:
item['apidS'] = empty_word
if 'cid' in data:
item['cid'] = data['cid']
else:
item['cid'] = empty_word
if 'docid' in data:
item['docid'] = data['docid']
else:
item['docid'] = empty_word
if 'law' in data:
item['law'] = data['law']
else:
item['law'] = empty_word
if 'link' in data:
item['link'] = data['link']
else:
item['link'] = empty_word
if 'litem' in data:
item['litem'] = data['litem']
else:
item['litem'] = empty_word
if 'ltid' in data:
item['ltid'] = data['ltid']
else:
item['ltid'] = empty_word
if 'pd' in data:
item['pd'] = data['pd']
else:
item['pd'] = empty_word
if 'psty' in data:
item['psty'] = data['psty']
else:
item['psty'] = empty_word
if 'rid' in data:
item['rid'] = data['rid']
else:
item['rid'] = empty_word
if 'ti' in data:
item['ti'] = data['ti']
else:
item['ti'] = empty_word
if 'ty' in data:
item['ty'] = data['ty']
else:
item['ty'] = empty_word
detail_url = 'http://api.zqz510.com/tmof/detail?docid={}&callback=_jqjsp&_{}='.format(item['docid'], str(int(time.time() * 1000)))
yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'item': item}, cookies=self.cookie)
def parse_detail(self, response):
json_text = json.loads(response.text[7:-1], encoding='utf-8')
item = response.meta['item']
if 'dtls' in json_text:
item['dtls'] = str(json_text['dtls'])
else:
item['dtls'] = empty_word
if 'ftxt' in json_text:
item['ftxt'] = json_text['ftxt']
else:
item['ftxt'] = empty_word
if 'judg' in json_text:
item['judg'] = str(json_text['judg'])
else:
item['judg'] = empty_word
if 'judgList' in json_text:
item['judgList'] = str(json_text['judgList'])
else:
item['judgList'] = empty_word
if 'links' in json_text:
item['links'] = str(json_text['links'])
else:
item['links'] = empty_word
if 'ltidAll' in json_text:
item['ltidAll'] = str(json_text['ltidAll'])
else:
item['ltidAll'] = empty_word
if 'pdCn' in json_text:
item['pdCn'] = str(json_text['pdCn'])
else:
item['pdCn'] = empty_word
yield item
|
[
"34021500@qq.com"
] |
34021500@qq.com
|
d72f0e6e1d8aaabc1a02b10a8fbc864b8f6d0b65
|
29345337bf86edc938f3b5652702d551bfc3f11a
|
/python/src/main/python/pyalink/alink/tests/examples/from_docs/test_totensorstreamop.py
|
78c1de91112c783148b8652120fe7425e975fcf9
|
[
"Apache-2.0"
] |
permissive
|
vacaly/Alink
|
32b71ac4572ae3509d343e3d1ff31a4da2321b6d
|
edb543ee05260a1dd314b11384d918fa1622d9c1
|
refs/heads/master
| 2023-07-21T03:29:07.612507
| 2023-07-12T12:41:31
| 2023-07-12T12:41:31
| 283,079,072
| 0
| 0
|
Apache-2.0
| 2020-07-28T02:46:14
| 2020-07-28T02:46:13
| null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestToTensorStreamOp(unittest.TestCase):
def test_totensorstreamop(self):
df = pd.DataFrame(["FLOAT#6#0.0 0.1 1.0 1.1 2.0 2.1 "])
source = StreamOperator.fromDataframe(df, schemaStr='vec string')
source.link(
ToTensorStreamOp()
.setSelectedCol("vec")
.setTensorShape([2, 3])
.setTensorDataType("float")
).print()
StreamOperator.execute()
pass
|
[
"shaomeng.wang.w@gmail.com"
] |
shaomeng.wang.w@gmail.com
|
9ab8c1cfef72c9b54df1a43e0a919da8d13a725c
|
9c81c170f03ba925bf3d0682526245c202e384a7
|
/superset/cli/test.py
|
f175acec470cd59f06f6d1ad8de07765a2520901
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
zcong1993/incubator-superset
|
2a08177641eff178dee9db852887ad2d19d70d54
|
269c99293f42089958dc98b5d6e5899509fc3111
|
refs/heads/master
| 2023-08-17T12:24:59.438120
| 2023-08-17T10:50:24
| 2023-08-17T10:50:24
| 209,522,299
| 0
| 0
|
Apache-2.0
| 2023-03-06T08:10:31
| 2019-09-19T10:09:21
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,860
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import click
from colorama import Fore
from flask.cli import with_appcontext
import superset.utils.database as database_utils
from superset import app, security_manager
logger = logging.getLogger(__name__)
@click.command()
@with_appcontext
def load_test_users() -> None:
"""
Loads admin, alpha, and gamma user for testing purposes
Syncs permissions for those users/roles
"""
print(Fore.GREEN + "Loading a set of users for unit tests")
load_test_users_run()
def load_test_users_run() -> None:
"""
Loads admin, alpha, and gamma user for testing purposes
Syncs permissions for those users/roles
"""
if app.config["TESTING"]:
sm = security_manager
examples_db = database_utils.get_example_database()
examples_pv = sm.add_permission_view_menu("database_access", examples_db.perm)
sm.sync_role_definitions()
gamma_sqllab_role = sm.add_role("gamma_sqllab")
sm.add_permission_role(gamma_sqllab_role, examples_pv)
gamma_no_csv_role = sm.add_role("gamma_no_csv")
sm.add_permission_role(gamma_no_csv_role, examples_pv)
for role in ["Gamma", "sql_lab"]:
for perm in sm.find_role(role).permissions:
sm.add_permission_role(gamma_sqllab_role, perm)
if str(perm) != "can csv on Superset":
sm.add_permission_role(gamma_no_csv_role, perm)
users = (
("admin", "Admin"),
("gamma", "Gamma"),
("gamma2", "Gamma"),
("gamma_sqllab", "gamma_sqllab"),
("alpha", "Alpha"),
("gamma_no_csv", "gamma_no_csv"),
)
for username, role in users:
user = sm.find_user(username)
if not user:
sm.add_user(
username,
username,
"user",
username + "@fab.org",
sm.find_role(role),
password="general",
)
sm.get_session.commit()
|
[
"noreply@github.com"
] |
zcong1993.noreply@github.com
|
f4771bd090478972d022ce9b450d530bb2408052
|
6c3ab38e350734f1bc4f0c746ea55a12838ce5ee
|
/pcserver/mainapp/handlers.py
|
93a7d32aa090f9a76b8f6ab1bca16d7d2eda3868
|
[] |
no_license
|
joelsemar/Programming-Challenge
|
1dd4fb487d02e05ed494e66da99a627970832988
|
b8bf8e115dc3c242d62bf696d3268a4b31019592
|
refs/heads/master
| 2020-05-17T15:16:45.892328
| 2011-08-31T19:17:15
| 2011-08-31T19:17:15
| 2,298,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
from webservice_tools.utils import BaseHandler, AutoListHandler
from webservice_tools.decorators import login_required
from mainapp.models import * #@UnusedWildImport
#Create your handlers here
class PhotosHandler(AutoListHandler):
model = Photo
allowed_methods = ('GET',)
extra_fields = ('image_url',)
exclude = ('image', )
@login_required
def read(self, request, response):
"""
Returns a list of Photo objects.
API Handler: GET /photos
Params:
@key [string] your api key
Returns:
@photos [Photo] list of photos, see Photo docs for details
"""
return super(PhotosHandler, self).read(request, response)
class PhotoHandler(BaseHandler):
model = Photo
allowed_methods = ('GET',)
extra_fields = ('image_url',)
exclude = ('image', )
@login_required
def read(self, request, id, response):
"""
Fetch the details of a photo by id
API Handler: GET /photo/{id}
Params:
@id [id] id of the photo (in the url)
@key [string] your api key
Returns:
@title [string] title
@description [string] a short description
@image_url [url] a url to the corresponding image
"""
return super(PhotoHandler, self).read(request, id, response)
#ALL DEFINITION EOF
module_name = globals().get('__name__')
handlers = sys.modules[module_name]
handlers._all_ = []
for handler_name in dir():
m = getattr(handlers, handler_name)
if type(m) == type(BaseHandler):
handlers._all_.append(handler_name)
|
[
"semarjt@gmail.com"
] |
semarjt@gmail.com
|
43e3f69a4d43e8fd97a6995fa95b1197d002dc0e
|
0315255c749b12216a7c8ac26378d8921466284a
|
/tests/integration/client/standard.py
|
969611b4d0a0800f10b1c10258875138538f5b08
|
[
"Apache-2.0"
] |
permissive
|
jhutchins/salt
|
a32de1362c6787ec96df7ce57bf9b98f20eaf30a
|
22ec0cee6a8a842ec426b7a3e634723ea7ce7256
|
refs/heads/master
| 2021-01-21T00:05:05.782149
| 2012-04-06T22:03:19
| 2012-04-06T22:03:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
# Import python libs
import subprocess
# Import salt libs
import integration
class StdTest(integration.ModuleCase):
'''
Test standard client calls
'''
def test_cli(self):
'''
Test cli function
'''
cmd_iter = self.client.cmd_cli(
'minion',
'test.ping',
)
for ret in cmd_iter:
self.assertTrue(ret['minion'])
def test_iter(self):
'''
test cmd_iter
'''
cmd_iter = self.client.cmd_iter(
'minion',
'test.ping',
)
for ret in cmd_iter:
self.assertTrue(ret['minion'])
def test_iter_no_block(self):
'''
test cmd_iter_no_block
'''
cmd_iter = self.client.cmd_iter_no_block(
'minion',
'test.ping',
)
for ret in cmd_iter:
if ret is None:
continue
self.assertTrue(ret['minion'])
def test_full_returns(self):
'''
test cmd_iter
'''
ret = self.client.cmd_full_return(
'minion',
'test.ping',
)
self.assertTrue(ret['minion'])
|
[
"thatch45@gmail.com"
] |
thatch45@gmail.com
|
11b9bf5a469cbefb5d55ecbc166fdf0b95d5e6a5
|
d2bb13cec7faf28e3d268312298f03c99806bd8b
|
/IPTS-16891-Dy2Ti2O7/norm_mesh_symm_All_rwp_100mK_7.py
|
d66eaa8f6dc140ec0ed3f53c2db9c0369b379c0f
|
[] |
no_license
|
rosswhitfield/corelli
|
06a91c26556ea788f20f973a1018a56e82a8c09a
|
d9e47107e3272c4457aa0d2e0732fc0446f54279
|
refs/heads/master
| 2021-08-07T14:04:24.426151
| 2021-08-03T19:19:05
| 2021-08-03T19:19:05
| 51,771,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,488
|
py
|
from mantid.simpleapi import *
from mantid.geometry import SymmetryOperationFactory
import numpy as np
# about information on where the data are and where to save
iptsfolder= "/SNS/CORELLI/IPTS-16891/"
outputdir="/SNS/users/rwp/corelli/IPTS-16891-Dy2Ti2O7/"
nxfiledir=iptsfolder + "nexus/"
ccfiledir = iptsfolder +"shared/autoreduce/"
UBfile = iptsfolder+"shared/DTO_UB_111Vertical.mat"
reducedfile_prefix = "DTO_cc"
LoadNexus(Filename='/SNS/CORELLI/shared/Vanadium/2016B/SolidAngle20160720NoCC.nxs', OutputWorkspace='sa')
LoadNexus(Filename='/SNS/CORELLI/shared/Vanadium/2016B/Spectrum20160720NoCC.nxs', OutputWorkspace='flux')
MaskBTP(Workspace='sa',Bank="1-30,62-91")
MaskBTP(workspace='sa',Pixel='1-16,200-256') #Mask the magnet
MaskBTP(Workspace='sa',Bank="49",Tube="1")
MaskBTP(Workspace='sa',Bank="54",Tube="1")
MaskBTP(Workspace='sa',Bank="58",Tube="13-16",Pixel="80-130")
MaskBTP(Workspace='sa',Bank="59",Tube="1-4",Pixel="80-130")
# Get UBs
LoadEmptyInstrument(Filename='/SNS/CORELLI/shared/Calibration/CORELLI_Definition_cal_20160310.xml', OutputWorkspace='ub')
LoadIsawUB(InputWorkspace='ub', Filename=UBfile)
ub=mtd['ub'].sample().getOrientedLattice().getUB()
print "Starting UB :"
print ub
#DTO Fd-3m (227) general position has 192 symmety operations.
symOps = SymmetryOperationFactory.createSymOps(\
"x,y,z; -x,-y,z; -x,y,-z; x,-y,-z;\
z,x,y; z,-x,-y; -z,-x,y; -z,x,-y;\
y,z,x; -y,z,-x; y,-z,-x; -y,-z,x;\
y,x,-z; -y,-x,-z; y,-x,z; -y,x,z;\
x,z,-y; -x,z,y; -x,-z,-y; x,-z,y;\
z,y,-x; z,-y,x; -z,y,x; -z,-y,-x;\
-x,-y,-z; x,y,-z; x,-y,z; -x,y,z;\
-z,-x,-y; -z,x,y; z,x,-y; z,-x,y;\
-y,-z,-x; y,-z,x; -y,z,x; y,z,-x;\
-y,-x,z; y,x,z; -y,x,-z; y,-x,-z;\
-x,-z,y; x,-z,-y; x,z,y; -x,z,-y;\
-z,-y,x; -z,y,-x; z,-y,-x; z,y,x")
ub_list=[]
for sym in symOps:
UBtrans = np.zeros((3,3))
UBtrans[0] = sym.transformHKL([1,0,0])
UBtrans[1] = sym.transformHKL([0,1,0])
UBtrans[2] = sym.transformHKL([0,0,1])
UBtrans=np.matrix(UBtrans.T)
new_ub = ub*UBtrans
print "Symmetry transform for "+sym.getIdentifier()
print UBtrans
print "New UB:"
print new_ub
ub_list.append(new_ub)
#load in background
#bkg=LoadEventNexus('/SNS/CORELLI/IPTS-15796/nexus/CORELLI_28124.nxs.h5')
#bkg=LoadNexus('/SNS/CORELLI/IPTS-15796/shared/autoreduce/CORELLI_28124_elastic.nxs')
#MaskDetectors(Workspace=bkg,MaskedWorkspace='sa')
#pc_bkg=sum(bkg.getRun()['proton_charge'].value)
#print 'pc_bkg=:'+str(pc_bkg)
#T=1.8 K
runs = range(34599,34635,1)
#T=100 mK
runs = range(34635,34653,1)
totalrun = len(runs)
print "Total number of runs %d" %totalrun
if mtd.doesExist('normMD'):
DeleteWorkspace('normMD')
if mtd.doesExist('dataMD'):
DeleteWorkspace('dataMD')
#for r in runs:
for index, r in enumerate(runs):
print index, ' Processing run : %s' %r
num=0
print 'Loading run number:'+ str(r)
#filename='/SNS/CORELLI/IPTS-15526/nexus/CORELLI_'+str(r)+'.nxs.h5'
#dataR=LoadEventNexus(Filename=filename)
filename=ccfiledir+'CORELLI_'+str(r)+'_elastic.nxs'
dataR=LoadNexus(Filename=filename)
LoadInstrument(Workspace= dataR, Filename='/SNS/CORELLI/shared/Calibration/CORELLI_Definition_cal_20160310.xml',RewriteSpectraMap=False)
MaskDetectors(Workspace=dataR,MaskedWorkspace='sa')
pc_data=sum(dataR.getRun()['proton_charge'].value)
print 'pc_data=:'+str(pc_data)
#dataR=dataR - bkg*pc_data/pc_bkg
# subtract the background if a background file was provided. Please make sure that the data were treated in the same way in terms of proton charge.
if mtd.doesExist('Bkg'):
bkg = mtd['Bkg']
ratio = pc_data/pc_bkg
bkg_c = bkg*ratio
Minus(LHSWorkspace=dataR, RHSWorkspace=bkg_c, OutputWorkspace=dataR)
dataR=ConvertUnits(dataR,Target="Momentum",EMode="Elastic")
dataR=CropWorkspace(dataR,XMin=2.5,XMax=10)
SetGoniometer(dataR,Axis0="BL9:Mot:Sample:Axis2,0,1,0,1")
LoadIsawUB(InputWorkspace=dataR,Filename=UBfile)
for ub in ub_list:
#for index, ub in enumerate(ub_list):
#print "index, using UB ", (index+1), ":"
num += 1
print "Run number"+str(r)+" Using UB:"+str(num)
print ub
SetUB(dataR, UB=ub)
md=ConvertToMD(InputWorkspace=dataR,QDimensions='Q3D',dEAnalysisMode='Elastic', Q3DFrames='HKL',
QConversionScales='HKL',MinValues='-7.1,-7.1,-7.1',MaxValues='7.1,7.1,7.1')
a1,b1=MDNormSCD(InputWorkspace='md',FluxWorkspace='flux',SolidAngleWorkspace='sa',
AlignedDim0="[H,0,0],-7.01,7.01,701",
AlignedDim1="[0,K,0],-7.01,7.01,701",
AlignedDim2="[0,0,L],-7.01,7.01,701")
if mtd.doesExist('dataMD'):
dataMD=dataMD+a1
else:
dataMD=CloneMDWorkspace(a1)
if mtd.doesExist('normMD'):
normMD=normMD+b1
else:
normMD=CloneMDWorkspace(b1)
normData_CC=dataMD/normMD
SaveMD('dataMD',Filename=outputdir+'DTO_datacc_48sym_Temp100mK_7.nxs')
SaveMD('normMD',Filename=outputdir+'DTO_normcc_48sym_Temp100mK_7.nxs')
SaveMD('normData_CC',Filename=outputdir+'DTO_normdatacc_48sym_Temp100mK_7.nxs')
# group the data
#data6K=GroupWorkspaces(datatoMerge)
#md6K=GroupWorkspaces(mdtoMerge)
|
[
"whitfieldre@ornl.gov"
] |
whitfieldre@ornl.gov
|
abd7adc1822c7a3ded2bfbb351e303bc38039614
|
99a310f6bb6c7a6c728f1b3ae78054487372042d
|
/aoc2019/intcode/state_machine.py
|
b68372737c28f105cbb818391176e19138743da5
|
[] |
no_license
|
jepebe/aoc2018
|
46ce6b46479a0faf2c2970413af14a071dcfdb79
|
4bf91b99bec4b59529533ef70f24bf6496bada99
|
refs/heads/master
| 2023-01-11T16:44:42.125394
| 2023-01-06T06:27:14
| 2023-01-06T06:27:14
| 159,912,721
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,926
|
py
|
from collections import defaultdict
def get_address(state_machine, parameter, write_mode=False):
mode = state_machine['parameter_modes'][parameter]
pos = state_machine['pos']
if mode == 0:
addr = state_machine['instructions'][pos]
elif mode == 1:
if write_mode:
print('Writing in immediate mode?')
addr = pos
elif mode == 2:
addr = state_machine['instructions'][pos]
relative_pos = state_machine['relative_pos']
addr = addr + relative_pos
else:
raise ('Unknown addressing mode %i for read' % mode)
return addr
def read(state_machine, parameter):
addr = get_address(state_machine, parameter)
state_machine['pos'] += 1
if addr >= len(state_machine['instructions']):
return state_machine['memory'][addr]
else:
return state_machine['instructions'][addr]
def write(state_machine, parameter, value):
addr = get_address(state_machine, parameter, write_mode=True)
state_machine['pos'] += 1
if addr >= len(state_machine['instructions']):
state_machine['memory'][addr] = value
else:
state_machine['instructions'][addr] = value
def add(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, a + b)
def multiply(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, a * b)
def get_input(state_machine):
if len(state_machine['input']) == 0:
state_machine['wait'] = True
state_machine['pos'] -= 1
state_machine['instruction_count'] -= 1
else:
data = state_machine['input'].pop(0)
write(state_machine, 0, data)
def output(state_machine):
value = read(state_machine, 0)
state_machine['output'].append(value)
if state_machine['output_enabled']:
print('Output from state machine %s' % value)
def jump_if_true(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
if a != 0:
state_machine['pos'] = b
def jump_if_false(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
if a == 0:
state_machine['pos'] = b
def less_than(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, 1 if a < b else 0)
def equals(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, 1 if a == b else 0)
def adjust_relative(state_machine):
a = read(state_machine, 0)
state_machine['relative_pos'] += a
def halt(state_machine):
state_machine['halt'] = True
# print('Instruction count: %i' % state_machine['instruction_count'])
def create_state_machine(instructions):
return {
'instructions': list(instructions),
'backup_instructions': list(instructions),
'memory': defaultdict(int),
'operation': 0,
'parameter_modes': [0],
'pos': 0,
'relative_pos': 0,
'instruction_count': 0,
'input': [],
'output': [],
'last_output': None,
'output_enabled': False,
'opcodes': {
1: add,
2: multiply,
3: get_input,
4: output,
5: jump_if_true,
6: jump_if_false,
7: less_than,
8: equals,
9: adjust_relative,
99: halt
},
'halt': False,
'wait': False
}
def reset_state_machine(state_machine):
state_machine['instructions'] = list(state_machine['backup_instructions'])
state_machine['memory'] = defaultdict(int)
state_machine['operation'] = 0
state_machine['parameter_modes'] = [0]
state_machine['pos'] = 0
state_machine['relative_pos'] = 0
state_machine['instruction_count'] = 0
state_machine['input'] = []
state_machine['output'] = []
state_machine['last_output'] = None
state_machine['output_enabled'] = False
state_machine['halt'] = False
state_machine['wait'] = False
def parse(state_machine):
pos = state_machine['pos']
opcode = state_machine['instructions'][pos]
op = opcode % 100
p1 = ((opcode - op) // 100) % 10
p2 = ((opcode - op) // 1000) % 10
p3 = ((opcode - op) // 10000) % 10
state_machine['operation'] = state_machine['opcodes'][op]
state_machine['parameter_modes'] = [p1, p2, p3]
state_machine['pos'] += 1
def run_state_machine(state_machine):
while not state_machine['halt'] and not state_machine['wait']:
parse(state_machine)
operation = state_machine['operation']
operation(state_machine)
state_machine['instruction_count'] += 1
def add_input(state_machine, data):
state_machine['input'].append(data)
if state_machine['wait']:
state_machine['wait'] = False
def get_output(state_machine):
if not has_output(state_machine):
raise UserWarning('No output available!')
state_machine['last_output'] = state_machine['output'][0]
return state_machine['output'].pop(0)
def has_output(state_machine):
return len(state_machine['output']) > 0
def get_last_output(state_machine):
return state_machine['last_output']
def flush_output(state_machine):
while has_output(state_machine):
get_output(state_machine)
def load_instructions(filename):
with open(filename) as f:
instructions = f.readline().split(',')
instructions = [int(x) for x in instructions]
return instructions
def load_state_machine(filename):
instructions = load_instructions(filename)
return create_state_machine(instructions)
def is_running(state_machine):
return not state_machine['halt']
def print_output(state_machine):
import sys
while has_output(state_machine):
v = get_output(state_machine)
sys.stdout.write(str(v) if v > 255 else chr(v))
|
[
"jepebe@users.noreply.github.com"
] |
jepebe@users.noreply.github.com
|
64cddf5250ac60f94ef5c62aedfa3eb120d3e5f8
|
8ca70628ca811e08fb77b8e251fc8e5049486a65
|
/airbyte-integrations/bases/base-python/base_python/cdk/streams/exceptions.py
|
6727216dd5dd50496241a0890070cb87439e8f82
|
[
"MIT"
] |
permissive
|
Tana8M/airbyte
|
a19544d2f7997ec7551793f7077d3e02bfe6ac84
|
49296ef657be272684c7259ed0d6be06e574dbe1
|
refs/heads/master
| 2023-04-15T15:04:22.849307
| 2021-04-23T23:12:55
| 2021-04-23T23:12:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Union
import requests
class BaseBackoffException(requests.exceptions.HTTPError):
pass
class UserDefinedBackoffException(BaseBackoffException):
"""
An exception that exposes how long it attempted to backoff
"""
def __init__(self, backoff: Union[int, float], request: requests.PreparedRequest, response: requests.Response):
"""
:param backoff: how long to backoff in seconds
:param request: the request that triggered this backoff exception
:param response: the response that triggered the backoff exception
"""
self.backoff = backoff
super().__init__(request=request, response=response)
class DefaultBackoffException(BaseBackoffException):
pass
|
[
"noreply@github.com"
] |
Tana8M.noreply@github.com
|
3daab6c956e8d126316ecdb6ef6e71d8af6a258d
|
1c8a1b7cfb5c78fe94c4cc62a78dbfff96161924
|
/day05/test04.py
|
7715b05a49b005d9cad71dc19124fa6797945c72
|
[] |
no_license
|
WHUTyuen/PIL_opencv
|
d264858f0eaa4ecc555747efd5f277f48a432b91
|
3ae6e7d878215866c304e64eac05bf1011ecb428
|
refs/heads/main
| 2023-01-01T14:00:33.331676
| 2020-11-01T11:35:18
| 2020-11-01T11:35:18
| 309,072,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import cv2
import numpy as np
A = cv2.imread('3.jpg')
B = cv2.imread('4.jpg')
G = A.copy()
gpA = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpA.append(G)
G = B.copy()
gpB = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpB.append(G)
# generate Laplacian Pyramid for A
lpA = [gpA[5]]
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i - 1], GE)
lpA.append(L)
# generate Laplacian Pyramid for B
lpB = [gpB[5]]
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i - 1], GE)
lpB.append(L)
# Now add left and right halves of images in each level
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:, 0:cols // 2], lb[:, cols // 2:]))
LS.append(ls)
# now reconstruct
ls_ = LS[0]
for i in range(1, 6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:, :cols // 2], B[:, cols // 2:]))
cv2.imshow('Pyramid_blending.jpg', ls_)
cv2.imshow('Direct_blending.jpg', real)
cv2.waitKey(0)
|
[
"noreply@github.com"
] |
WHUTyuen.noreply@github.com
|
27c38c01ec059532373e8cd03289ccde4ded2e1d
|
f0f3f8731145e236e8e08dafb4201108d35af488
|
/wish_list_items/migrations/0007_auto_20160414_1317.py
|
8478f0d9cfbb5235617279dac1587637337832db
|
[] |
no_license
|
AaronScruggs/wish_list_project
|
49fdfc9c3a9e72470084bbf283085c15aa659a3e
|
a2a741823e0a570390ce344f3407f6f3b57f2590
|
refs/heads/master
| 2021-01-01T05:18:10.817456
| 2016-04-19T00:36:24
| 2016-04-19T00:36:24
| 56,259,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-14 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wish_list_items', '0006_auto_20160414_1312'),
]
operations = [
migrations.AlterField(
model_name='wishitem',
name='item_url',
field=models.URLField(default=True, null=True),
),
]
|
[
"aarondscruggs@gmail.com"
] |
aarondscruggs@gmail.com
|
4816b6ce56b6ba10760fc6ec50b511666a0ef942
|
c0f5d309576f791f8cc062e2d0cad340eec41d7d
|
/3.py
|
846552142673f67774ae9cc5803b41248ec09248
|
[] |
no_license
|
mjjin1214/algorithm
|
fa91455ab792c38d01fd210c12e53e50f516eb55
|
423119406061443939b4b966c7d9f1513544dd03
|
refs/heads/master
| 2020-04-22T19:31:23.981387
| 2019-04-05T07:58:10
| 2019-04-05T07:58:10
| 170,610,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
import sys
sys.stdin = open('input2.txt')
def subset(n, su):
global visit, count
if n == len(score):
if not visit & (1<<su):
visit ^= (1<<su)
count += 1
return
subset(n+1, su+score[n])
subset(n+1, su)
T = int(input())
for t in range(T):
N = int(input())
score = list(set(map(int, input().split())))
visit = count = 0
subset(0, 0)
print('#{} {}'.format(t+1, count+N-len(score)))
|
[
"moc0etan@gmail.com"
] |
moc0etan@gmail.com
|
cb1e1c4fd0adabebcd87bc33eefe453ec2df48fa
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/1001-1500/1443.Minimum Time to Collect All Apples in a Tree.py
|
e8ae7ff0deadce1de133f2d3d5feb31d43fde59a
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062
| 2023-08-27T07:59:16
| 2023-08-27T07:59:16
| 57,526,914
| 69
| 9
| null | 2023-08-20T06:34:41
| 2016-05-01T05:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
from collections import defaultdict
class Solution(object):
def minTime(self, n, edges, hasApple):
"""
:type n: int
:type edges: List[List[int]]
:type hasApple: List[bool]
:rtype: int
"""
graph = defaultdict(list)
for edge in edges:
graph[edge[0]].append(edge[1])
graph[edge[1]].append(edge[0])
visited = set()
def dfs(root):
res = 0
if root not in visited:
visited.add(root)
for nbr in graph[root]:
res += dfs(nbr)
if res or hasApple[root]:
res += 2
return res
return max(0, dfs(0) - 2)
|
[
"noreply@github.com"
] |
kaiwensun.noreply@github.com
|
c2f2d9873572b84a36f2345329ebd77f92a88cbe
|
98e1716c1c3d071b2fedef0ac029eb410f55762c
|
/part15-statistical-thinking-1/No04-Bee-swarm-plot.py
|
0b503f7631dcaaedd5a7afe2edbda8d651de8a7c
|
[] |
no_license
|
iamashu/Data-Camp-exercise-PythonTrack
|
564531bcf1dff119949cbb75e1fd63d89cb2779f
|
c72a4e806494f0e263ced9594597dc8882c2131c
|
refs/heads/master
| 2020-07-22T00:23:12.024386
| 2019-04-12T09:24:42
| 2019-04-12T09:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
#Bee swarm plot
'''
Make a bee swarm plot of the iris petal lengths. Your x-axis should contain each of the three species, and the y-axis the petal lengths. A data frame containing the data is in your namespace as df.
For your reference, the code Justin used to create the bee swarm plot in the video is provided below:
_ = sns.swarmplot(x='state', y='dem_share', data=df_swing)
_ = plt.xlabel('state')
_ = plt.ylabel('percent of vote for Obama')
plt.show()
In the IPython Shell, you can use sns.swarmplot? or help(sns.swarmplot) for more details on how to make bee swarm plots using seaborn.
Instructions
In the IPython Shell, inspect the DataFrame df using df.head(). This will let you identify which column names you need to pass as the x and y keyword arguments in your call to sns.swarmplot().
Use sns.swarmplot() to make a bee swarm plot from the DataFrame containing the Fisher iris data set, df. The x-axis should contain each of the three species, and the y-axis should contain the petal lengths.
Label the axes.
Show your plot.
'''
# code
sns.swarmplot(x='species', y='petal length (cm)', data=df)
# Label the axes
plt.xlabel('species')
plt.ylabel('petal length (cm)')
# Show the plot
plt.show()
|
[
"beiran@hotmail.com"
] |
beiran@hotmail.com
|
f7bb5b008461cd4f51770163a3cf7e600d784405
|
81c5c07e1144747dc0e98f8dffb287a69be1eba7
|
/score_mcc_bin.py
|
686c4e86fcab42e4f12a69f6f893e59e1cfe31ee
|
[] |
no_license
|
twistedmove/e2e_antispoofing
|
acbb9ec5bc4454c1698fc355d0c0fee3bf70006e
|
686dfb515b2c568a1006136f56bbaad0419f0787
|
refs/heads/master
| 2020-09-07T10:41:12.024794
| 2019-10-06T19:28:19
| 2019-10-06T19:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,454
|
py
|
import argparse
import numpy as np
import glob
import torch
import torch.nn.functional as F
import os
from kaldi_io import read_mat_scp
import model as model_
import scipy.io as sio
from utils import compute_eer_labels, set_device, read_trials, get_freer_gpu
def prep_feats(data_):
#data_ = ( data_ - data_.mean(0) ) / data_.std(0)
features = data_.T
if features.shape[1]<50:
mul = int(np.ceil(50/features.shape[1]))
features = np.tile(features, (1, mul))
features = features[:, :50]
return torch.from_numpy(features[np.newaxis, np.newaxis, :, :]).float()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute scores for mcc model')
parser.add_argument('--path-to-data', type=str, default='./data/feats.scp', metavar='Path', help='Path to input data')
parser.add_argument('--trials-path', type=str, default='./data/trials', metavar='Path', help='Path to trials file')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')
parser.add_argument('--out-path', type=str, default='./out.txt', metavar='Path', help='Path to output hdf file')
parser.add_argument('--model', choices=['lstm', 'resnet', 'resnet_pca', 'lcnn_9', 'lcnn_29', 'lcnn_9_pca', 'lcnn_29_pca', 'lcnn_9_prodspec', 'lcnn_9_icqspec', 'lcnn_9_CC', 'lcnn_29_CC', 'resnet_CC'], default='lcnn_9', help='Model arch')
parser.add_argument('--n-classes', type=int, default=-1, metavar='N', help='Number of classes for the mcc case (default: binary classification)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--no-output-file', action='store_true', default=False, help='Disables writing scores into out file')
parser.add_argument('--no-eer', action='store_true', default=False, help='Disables computation of EER')
parser.add_argument('--eval', action='store_true', default=False, help='Enables eval trials reading')
parser.add_argument('--ncoef', type=int, default=90, metavar='N', help='Number of cepstral coefs (default: 90)')
parser.add_argument('--init-coef', type=int, default=0, metavar='N', help='First cepstral coefs (default: 0)')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
if args.cp_path is None:
raise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')
if os.path.isfile(args.out_path):
os.remove(args.out_path)
print(args.out_path + ' Removed')
print('Cuda Mode is: {}'.format(args.cuda))
print('Selected model is: {}'.format(args.model))
if args.cuda:
device = get_freer_gpu()
if args.model == 'lstm':
model = model_.cnn_lstm(nclasses=args.n_classes)
elif args.model == 'resnet':
model = model_.ResNet(nclasses=args.n_classes)
elif args.model == 'resnet_pca':
model = model_.ResNet_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_9':
model = model_.lcnn_9layers(nclasses=args.n_classes)
elif args.model == 'lcnn_29':
model = model_.lcnn_29layers_v2(nclasses=args.n_classes)
elif args.model == 'lcnn_9_pca':
model = model_.lcnn_9layers_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_29_pca':
model = model_.lcnn_29layers_v2_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_9_icqspec':
model = model_.lcnn_9layers_icqspec(nclasses=args.n_classes)
elif args.model == 'lcnn_9_prodspec':
model = model_.lcnn_9layers_prodspec(nclasses=args.n_classes)
elif args.model == 'lcnn_9_CC':
model = model_.lcnn_9layers_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
elif args.model == 'lcnn_29_CC':
model = model_.lcnn_29layers_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
elif args.model == 'resnet_CC':
model = model_.ResNet_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
print('Loading model')
ckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)
model.load_state_dict(ckpt['model_state'], strict=False)
model.eval()
print('Model loaded')
print('Loading data')
if args.eval:
test_utts = read_trials(args.trials_path, eval_=args.eval)
else:
test_utts, attack_type_list, label_list = read_trials(args.trials_path, eval_=args.eval)
data = { k:m for k,m in read_mat_scp(args.path_to_data) }
print('Data loaded')
print('Start of scores computation')
score_list = []
with torch.no_grad():
for i, utt in enumerate(test_utts):
print('Computing score for utterance '+ utt)
feats = prep_feats(data[utt])
try:
if args.cuda:
feats = feats.to(device)
model = model.to(device)
score = 1.-F.softmax(model.forward(feats), dim=1)[:,1:].sum().item()
except:
feats = feats.cpu()
model = model.cpu()
score = 1.-F.softmax(model.forward(feats), dim=1)[:,1:].sum().item()
score_list.append(score)
print('Score: {}'.format(score_list[-1]))
if not args.no_output_file:
print('Storing scores in output file:')
print(args.out_path)
with open(args.out_path, 'w') as f:
if args.eval:
for i, utt in enumerate(test_utts):
f.write("%s" % ' '.join([utt, str(score_list[i])+'\n']))
else:
for i, utt in enumerate(test_utts):
f.write("%s" % ' '.join([utt, attack_type_list[i], label_list[i], str(score_list[i])+'\n']))
if not args.no_eer and not args.eval:
print('EER: {}'.format(compute_eer_labels(label_list, score_list)))
print('All done!!')
|
[
"joaomonteirof@gmail.com"
] |
joaomonteirof@gmail.com
|
158c8395e7b37a739bbe7438d2a3fb3853747fb2
|
0b20f4ce14b9ff77c84cedbecbaa29831335920d
|
/tests/cloudformation/file_formats/test_yaml.py
|
76149f86216a57acc3de965d65a22daae34bad5a
|
[
"Apache-2.0"
] |
permissive
|
sergesec488/checkov
|
219c1b3864ab4f70b39a4cd79b041e98f3145364
|
56008e1c531b3626f14716067731be6e673040bc
|
refs/heads/master
| 2023-04-10T12:26:49.749864
| 2021-02-26T18:36:52
| 2021-02-26T18:40:58
| 342,883,133
| 0
| 1
|
Apache-2.0
| 2023-03-30T13:31:25
| 2021-02-27T15:01:08
| null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
import os
import unittest
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlFileFormat(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/yaml"
report = runner.run(root_folder=test_files_dir)
summary = report.get_summary()
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 0)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
sergesec488.noreply@github.com
|
c03967857b3abb3a4db4df537c2c4342ac393b68
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/scatter/marker/line/_width.py
|
108770c589b4e4605b6ff605e20647ef337325b7
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='width', parent_name='scatter.marker.line', **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='style',
min=0,
role='style',
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
f60987e55994a05e1fbf45fa4d8ded677baca05b
|
732374714ffe0e0f2c07a493a2ee71c9271fdce0
|
/mysite/settings.py
|
bcd771fb691401a56d55a3106a4ee650b115e261
|
[] |
no_license
|
aaronahmid/mosunhomesrealtors
|
721fb20d671f1a58c64abc8bdf1209a5ab3236f1
|
561b56fd90179e163f0c861dae1d451cc1cfc662
|
refs/heads/main
| 2023-08-13T02:22:46.005517
| 2021-10-09T05:15:59
| 2021-10-09T05:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,269
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
import dj_database_url
import django_heroku
import cloudinary
import cloudinary.uploader
import cloudinary.api
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure--+3$m3fs+h3qdye&74^k@qadoro606d*%%qacpzw=&7g!ruu@l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com', 'www.mosunhomes-realtors.com', 'mosunhomes-realtors.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'cloudinary',
'cloudinary_storage',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_URL = '/static/'
STATICFILES_DIRS = os.path.join(BASE_DIR, "blog/static"),
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
cloudinary.config(
cloud_name = "thormiwa",
api_key = "584634363435482",
api_secret = "XGzynridSBzxfDGpkyOMnHAHGrA"
)
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
# Activate Django-Heroku.
django_heroku.settings(locals())
|
[
"thormiwa04@gmail.com"
] |
thormiwa04@gmail.com
|
93b57b5d8ab7beae315d919322890e775a1998e9
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_188/ch78_2019_04_04_19_41_08_100209.py
|
61335cb39d3ea9f3ad223e32472973ee949e080e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
from math import sqrt
def calcula_tempo(atletas):
tempo_de_conclusao = {}
for nome in atletas:
tempo_atleta = sqrt(200 / atletas[nome])
tempo_de_conclusao[nome] = tempo_atleta
return tempo_de_conclusao
def atleta_mais_rapido(dicionario):
menor_tempo = 0
melhor_atleta = ""
for nome in dicionario:
if menor_tempo > dicionario[nome]:
menor_tempo = dicionario[nome]
melhor_atleta = nome
return melhor_atleta
def tempo_mais_curto(dicionario):
menor_tempo = 0
for nome in dicionario:
if menor_tempo > dicionario[nome]:
menor_tempo = dicionario[nome]
return menor_tempo
nomes_aceleracoes_ateltas = {}
sair = False
while not sair:
nome = input("Digite o nome do atleta: ")
aceleracao = int(input("Digite a aceleracao do atleta: "))
if nome == "sair":
sair = True
else:
nomes_aceleracoes_atletas[nome] = aceleracao
nomes_tempos_atletas = calcula_tempo(nomes_aceleracoes_atletas)
nome = atleta_mais_rapido(nomes_tempos_atletas)
tempo = tempo_mais_curto(nomes_tempos_atletas)
print('O vencedor é {0} com tempo de conclusão de {1} s'.format(nome, tempo))
|
[
"you@example.com"
] |
you@example.com
|
b4cebd6904d477cd8224278ad3c87bbe2000ae9e
|
ccbfc7818c0b75929a1dfae41dc061d5e0b78519
|
/aliyun-openapi-python-sdk-master/aliyun-python-sdk-vpc/aliyunsdkvpc/request/v20160428/CreateRouterInterfaceRequest.py
|
f3794b0030c799277bdbb14c640f9f31c41bee1c
|
[
"Apache-2.0"
] |
permissive
|
P79N6A/dysms_python
|
44b634ffb2856b81d5f79f65889bfd5232a9b546
|
f44877b35817e103eed469a637813efffa1be3e4
|
refs/heads/master
| 2020-04-28T15:25:00.368913
| 2019-03-13T07:52:34
| 2019-03-13T07:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,414
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateRouterInterfaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateRouterInterface','vpc')
def get_AccessPointId(self):
return self.get_query_params().get('AccessPointId')
def set_AccessPointId(self,AccessPointId):
self.add_query_param('AccessPointId',AccessPointId)
def get_OppositeRouterId(self):
return self.get_query_params().get('OppositeRouterId')
def set_OppositeRouterId(self,OppositeRouterId):
self.add_query_param('OppositeRouterId',OppositeRouterId)
def get_OppositeAccessPointId(self):
return self.get_query_params().get('OppositeAccessPointId')
def set_OppositeAccessPointId(self,OppositeAccessPointId):
self.add_query_param('OppositeAccessPointId',OppositeAccessPointId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Role(self):
return self.get_query_params().get('Role')
def set_Role(self,Role):
self.add_query_param('Role',Role)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_HealthCheckTargetIp(self):
return self.get_query_params().get('HealthCheckTargetIp')
def set_HealthCheckTargetIp(self,HealthCheckTargetIp):
self.add_query_param('HealthCheckTargetIp',HealthCheckTargetIp)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Spec(self):
return self.get_query_params().get('Spec')
def set_Spec(self,Spec):
self.add_query_param('Spec',Spec)
def get_OppositeInterfaceId(self):
return self.get_query_params().get('OppositeInterfaceId')
def set_OppositeInterfaceId(self,OppositeInterfaceId):
self.add_query_param('OppositeInterfaceId',OppositeInterfaceId)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_AutoPay(self):
return self.get_query_params().get('AutoPay')
def set_AutoPay(self,AutoPay):
self.add_query_param('AutoPay',AutoPay)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OppositeRegionId(self):
return self.get_query_params().get('OppositeRegionId')
def set_OppositeRegionId(self,OppositeRegionId):
self.add_query_param('OppositeRegionId',OppositeRegionId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_OppositeInterfaceOwnerId(self):
return self.get_query_params().get('OppositeInterfaceOwnerId')
def set_OppositeInterfaceOwnerId(self,OppositeInterfaceOwnerId):
self.add_query_param('OppositeInterfaceOwnerId',OppositeInterfaceOwnerId)
def get_RouterType(self):
return self.get_query_params().get('RouterType')
def set_RouterType(self,RouterType):
self.add_query_param('RouterType',RouterType)
def get_HealthCheckSourceIp(self):
return self.get_query_params().get('HealthCheckSourceIp')
def set_HealthCheckSourceIp(self,HealthCheckSourceIp):
self.add_query_param('HealthCheckSourceIp',HealthCheckSourceIp)
def get_RouterId(self):
return self.get_query_params().get('RouterId')
def set_RouterId(self,RouterId):
self.add_query_param('RouterId',RouterId)
def get_OppositeRouterType(self):
return self.get_query_params().get('OppositeRouterType')
def set_OppositeRouterType(self,OppositeRouterType):
self.add_query_param('OppositeRouterType',OppositeRouterType)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_PricingCycle(self):
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self,PricingCycle):
self.add_query_param('PricingCycle',PricingCycle)
|
[
"1478458905@qq.com"
] |
1478458905@qq.com
|
620b6dda3cf88205a7c9f1e46efff99abe37eb7d
|
256728286889a60e5d8896efc6869483daba3280
|
/cinemanio/sites/imdb/migrations/0001_initial.py
|
1f14d2c4b9e565d481ff8bf0acc5215f8e05d89a
|
[
"MIT"
] |
permissive
|
cinemanio/backend
|
5236be94d08ec79b9fc8d8973aee93ec8fad9b1b
|
c393dc8c2d59dc99aa2c3314d3372b6e2bf5497f
|
refs/heads/master
| 2021-05-01T13:02:08.102705
| 2019-11-10T14:33:37
| 2019-11-10T14:33:37
| 121,069,149
| 4
| 0
|
MIT
| 2020-02-12T00:09:03
| 2018-02-11T01:00:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
# Generated by Django 2.0.1 on 2018-01-26 01:06
import cinemanio.sites.imdb.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImdbMovie',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, verbose_name='IMDb ID')),
('rating', models.FloatField(blank=True, db_index=True, null=True, verbose_name='IMDb rating')),
('movie', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imdb', to='core.Movie')),
],
bases=(models.Model, cinemanio.sites.imdb.models.UrlMixin),
),
migrations.CreateModel(
name='ImdbPerson',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, verbose_name='IMDb ID')),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imdb', to='core.Person')),
],
bases=(models.Model, cinemanio.sites.imdb.models.UrlMixin),
),
]
|
[
"ramusus@gmail.com"
] |
ramusus@gmail.com
|
edf977c8ee2771f059d611fdf4b49337c5b6119e
|
a4174a9d51577d9b72b4e5dcf1be56bc9b0d242b
|
/retinanet/model/head/builder.py
|
b4153ffafb41099f951afdc540259b1454c0ab31
|
[
"Apache-2.0"
] |
permissive
|
lchen-wyze/retinanet-tensorflow2.x
|
996396724c858fdc954880f3c20db7865d930a87
|
86404a2da6ec636d4b1aef768ac52f018c127798
|
refs/heads/master
| 2023-08-23T06:12:39.629288
| 2021-10-18T15:52:23
| 2021-10-18T15:52:23
| 418,040,957
| 0
| 0
|
Apache-2.0
| 2021-10-17T06:26:21
| 2021-10-17T06:26:21
| null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
import numpy as np
import tensorflow as tf
from retinanet.model.head.detection_head import DetectionHead
def build_detection_heads(
params,
min_level,
max_level,
conv_2d_op_params=None,
normalization_op_params=None,
activation_fn=None):
if activation_fn is None:
raise ValueError('`activation_fn` cannot be None')
box_head = DetectionHead(
num_convs=params.num_convs,
filters=params.filters,
output_filters=params.num_anchors * 4,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer='zeros',
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='box-head')
prior_prob_init = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
class_head = DetectionHead(
num_convs=params.num_convs,
filters=params.filters,
output_filters=params.num_anchors*params.num_classes,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer=prior_prob_init,
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='class-head')
return box_head, class_head
def build_auxillary_head(
num_convs,
filters,
num_anchors,
min_level,
max_level,
conv_2d_op_params=None,
normalization_op_params=None,
activation_fn=None):
if activation_fn is None:
raise ValueError('`activation_fn` cannot be None')
prior_prob_init = tf.constant_initializer(-np.log((1 - 0.5) / 0.5))
auxillary_head = DetectionHead(
num_convs=num_convs,
filters=filters,
output_filters=num_anchors,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer=prior_prob_init,
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='auxillary-head')
return auxillary_head
|
[
"sriharihumbarwadi97@gmail.com"
] |
sriharihumbarwadi97@gmail.com
|
1f0aab49aa5a6590e8778e8b8366970e2e0a08f6
|
62babb33b9bede95aac217db04636956279bb2e2
|
/bit operation/1395C Boboniu and Bit Operations.py
|
90ae03a3fd60423b3df792021485ced2af7a8c6a
|
[] |
no_license
|
tycyd/codeforces
|
0322e31daf18544944c769fd2a50c6d006015e34
|
e0773f069c6c5793f9d9a07b61878a589e375a5f
|
refs/heads/master
| 2023-08-12T05:00:39.467404
| 2021-09-30T16:39:21
| 2021-09-30T16:39:21
| 266,847,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
from sys import stdin, stdout
# 1 1 1 => 1 0 0, 0 1 1
# 1 1 0 0 => 1 0 0
#
def boboniu_and_bit_operations(n, m, a_a, b_a):
for k in range(513):
cnt = 0
for a in a_a:
for b in b_a:
if ((a & b) | k) == k:
cnt += 1
break
if cnt == n:
return k
return -1
n, m = map(int, stdin.readline().split())
a_a = list(map(int, stdin.readline().split()))
b_a = list(map(int, stdin.readline().split()))
stdout.write(str(boboniu_and_bit_operations(n, m, a_a, b_a)) + '\n')
|
[
"tycyd@hotmail.com"
] |
tycyd@hotmail.com
|
4286d6e8f7466f4a7c7b415049764bd995510e58
|
272cf6bd5f56812e14c2ed0df60d626859ec2c96
|
/imdb_scrapy/spiders/script.py
|
e4449b1818474a1e4a37f9c3fa7e6064e5dd476e
|
[] |
no_license
|
abhinavjha98/scrapy_simple_hired
|
a1b5933be5a401585f6cdfef48299b765cf25303
|
a0dbf812d1d4a5e16d8bf46633bdc95b747f2fd3
|
refs/heads/master
| 2023-01-24T05:46:24.639774
| 2020-11-30T17:17:09
| 2020-11-30T17:17:09
| 298,634,627
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import urllib
import requests
# item class included here
class DmozItem(scrapy.Item):
# define the fields for your item here like:
ApplyLink = scrapy.Field()
Title = scrapy.Field()
Company = scrapy.Field()
Location = scrapy.Field()
salary = scrapy.Field()
Logo = scrapy.Field()
Description = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
page_number = 2
start_urls = [
'https://www.simplyhired.com/search?q=java&l=Philadelphia%2C+PA&job=fYxbZPaOvxUi_StIPQGdAhmm__9ReBI5jbVy7amchpkhgoG5xdkwUA'
]
BASE_URL = 'https://www.simplyhired.com'
def parse(self, response):
links = response.css('a.card-link').xpath("@href").extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
next_page = "https://www.simplyhired.com/search?q=java&l=Philadelphia%2C+PA&pn="+str(DmozSpider.page_number)+"&job=fYxbZPaOvxUi_StIPQGdAhmm__9ReBI5jbVy7amchpkhgoG5xdkwUA"
if DmozSpider.page_number<=91:
DmozSpider.page_number +=1
yield response.follow(next_page,callback=self.parse)
def parse_attr(self, response):
item = DmozItem()
logo = response.css('img.viewjob-company-logoImg').xpath("@src").extract()
try:
item["Logo"] = DmozSpider.BASE_URL+""+logo[0]
except:
item["Logo"] = 'none'
item["Title"] = response.css("div.viewjob-jobTitle::text").extract()
item["Location"] = response.css("div.viewjob-labelWithIcon::text")[1].extract()
item["Company"] = response.css("div.viewjob-labelWithIcon::text")[0].extract()
aa=response.css("div.p::text").extract()
text_list=""
for text in aa:
text = text.rstrip("\n")
text_list=text_list+text
item["Description"] = text_list
links = response.css('a.btn-apply').xpath("@href").extract()
# final_url = urllib.request.urlopen("https://www.simplyhired.com"+links[0],None,1).geturl()
final_url = requests.get("https://www.simplyhired.com"+links[0])
item["ApplyLink"] = final_url.url
item["salary"]=response.css("span.viewjob-labelWithIcon::text").extract()
return item
|
[
"abhinavjha98ald@gmail.com"
] |
abhinavjha98ald@gmail.com
|
8f3b6dd785a104a1985f13ba77bbd4751286ee03
|
7fd8a09fd94d09d568d67afcb4ecf3b60a936fe2
|
/Tests/TestEnvironment/test_config.py
|
ad9fcccfe8d638613e2087450489742dbd85bc2a
|
[
"MIT"
] |
permissive
|
dev-11/eigen-technical-task
|
4c2ac82c02f2cbd6b7020d2cbfc33beca20db37f
|
c0b041fc2bd27d2706ccdab94f6eb618f17098bd
|
refs/heads/master
| 2021-05-20T22:14:32.015768
| 2021-03-28T12:02:50
| 2021-03-28T12:02:50
| 252,434,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
DIRECTORIES_TO_SCAN = ['test_docs/']
TXT_FILE_EXTENSION = 'txt'
DEFAULT_INTERESTING_WEIGHT = 1
INTERESTING_RATING_THRESHOLD = 5
|
[
"otto@masterbranch.io"
] |
otto@masterbranch.io
|
19838a190c48902a9799ae5a54116786d9d5576b
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2901/58744/247697.py
|
300e82a6c695d61e1fd561bfba7acad1b071cf0a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
num = int(input())
def isAlternatingBits(num):
former_bit = 0 if num & 1 else 1
while num > 0:
if num & 1 == former_bit:
return False
num >>= 1
former_bit = 0 if former_bit else 1
return True
print(str(isAlternatingBits(num)).lower())
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
79a8e4c562139987c47fe34f81f4bc9c48703f36
|
3db7b5409f2f9c57ab3f98bda50f8b548d98063d
|
/samples/tests/test_model_samples.py
|
ed82dd678c2f104779586f523aeefb3e7b00a9f1
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-bigquery
|
66db156b52e97565f6211b2fab5aac4e519fa798
|
3645e32aeebefe9d5a4bc71a6513942741f0f196
|
refs/heads/main
| 2023-09-01T07:41:24.893598
| 2023-08-23T19:04:13
| 2023-08-23T19:04:13
| 226,992,475
| 622
| 287
|
Apache-2.0
| 2023-09-12T04:31:26
| 2019-12-10T00:09:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from .. import delete_model
from .. import get_model
from .. import list_models
from .. import update_model
if typing.TYPE_CHECKING:
import pytest
def test_model_samples(
capsys: "pytest.CaptureFixture[str]", dataset_id: str, model_id: str
) -> None:
"""Since creating a model is a long operation, test all model samples in
the same test, following a typical end-to-end flow.
"""
get_model.get_model(model_id)
out, err = capsys.readouterr()
assert model_id in out
list_models.list_models(dataset_id)
out, err = capsys.readouterr()
assert "Models contained in '{}':".format(dataset_id) in out
update_model.update_model(model_id)
out, err = capsys.readouterr()
assert "This model was modified from a Python program." in out
delete_model.delete_model(model_id)
out, err = capsys.readouterr()
assert "Deleted model '{}'.".format(model_id) in out
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
a43a6ca183fe13cab45ff1ffe654cb22df55bdd3
|
b3f6daa5d6c987eb8a61d5fe125bf2a98997e259
|
/8kyu/Simple multiplication/index.py
|
0853411208f8f60cc3ab604295bcd6f49ea44358
|
[] |
no_license
|
krnets/codewars-practice
|
53a0a6c9d2d8c2b94d6799a12f48dd588179a5ce
|
5f8e1cc1aebd900b9e5a276884419fc3e1ddef24
|
refs/heads/master
| 2022-12-20T19:33:43.337581
| 2022-12-16T05:32:39
| 2022-12-16T05:32:39
| 217,464,785
| 1
| 0
| null | 2020-07-20T08:36:31
| 2019-10-25T06:20:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
# 8kyu - Simple multiplication
""" This kata is about multiplying a given number by eight if it is an even number and by nine otherwise. """
# def simple_multiplication(number):
# return 8 * number if number % 2 == 0 else 9 * number
# def simple_multiplication(number):
# return number * (8 if number % 2 == 0 else 9)
# def simple_multiplication(number):
# return number * [8, 9][number % 2]
def simple_multiplication(number):
return number * (8 + number % 2)
q = simple_multiplication(2) # 16
q
q = simple_multiplication(1) # 9
q
q = simple_multiplication(8) # 64
q
q = simple_multiplication(4) # 32
q
q = simple_multiplication(5) # 45
q
|
[
"cmantheo@gmail.com"
] |
cmantheo@gmail.com
|
c4629c6296276f6dd000ac6acc97097972160f92
|
4755dabdcff6a45b9c15bf9ea814c6b8037874bd
|
/build/laser_proc/catkin_generated/pkg.installspace.context.pc.py
|
8aa2d2e231584bb4c6aa2e425d2a5cc3e336be50
|
[] |
no_license
|
Rallstad/RobotSnake
|
676a97bdfde0699736d613e73d539929a0c2b492
|
37ee6d5af0458b855acf7c2b83e0ee17833dbfd1
|
refs/heads/master
| 2023-01-03T05:46:46.268422
| 2018-05-27T16:01:47
| 2018-05-27T16:01:47
| 308,665,980
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/snake/Documents/catkin_ws/install/include".split(';') if "/home/snake/Documents/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;rosconsole;nodelet".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llaser_proc_library;-llaser_publisher;-llaser_transport;-llaser_proc_ROS;-lLaserProcNodelet".split(';') if "-llaser_proc_library;-llaser_publisher;-llaser_transport;-llaser_proc_ROS;-lLaserProcNodelet" != "" else []
PROJECT_NAME = "laser_proc"
PROJECT_SPACE_DIR = "/home/snake/Documents/catkin_ws/install"
PROJECT_VERSION = "0.1.4"
|
[
"vetle.fredriksen@gmail.com"
] |
vetle.fredriksen@gmail.com
|
0d9c589064bdfa802bbc69912c2b119c8b1a3167
|
5b3d8b5c612c802fd846de63f86b57652d33f672
|
/Python/seven_kyu/to_jaden_case.py
|
6f1011c1120d950fcc87a4462cab4f25505b6208
|
[
"Apache-2.0"
] |
permissive
|
Brokenshire/codewars-projects
|
1e591b57ed910a567f6c0423beb194fa7f8f693e
|
db9cd09618b8a7085b0d53ad76f73f9e249b9396
|
refs/heads/master
| 2021-07-22T18:50:25.847592
| 2021-01-25T23:27:17
| 2021-01-25T23:27:17
| 228,114,677
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
# Python solution for 'Jaden Casing Strings' codewars question.
# Level: 7 kyu
# Tags: Fundamentals, Strings, and Arrays.
# Author: Jack Brokenshire
# Date: 17/02/2020
import unittest
def to_jaden_case(string):
"""
Your task is to convert strings to how they would be written by Jaden Smith. The strings are actual quotes from
Jaden Smith, but they are not capitalized in the same way he originally typed them.
:param string: A string value input.
:return: A new string with each word in the sentence capitalized.
"""
return " ".join(x.capitalize() for x in string.split())
class TestToJadenCase(unittest.TestCase):
"""Class to test 'to_jaden_case' function"""
def test_name_list(self):
quote = "How can mirrors be real if our eyes aren't real"
self.assertEqual(to_jaden_case(quote), "How Can Mirrors Be Real If Our Eyes Aren't Real")
if __name__ == '__main__':
unittest.main()
|
[
"29889878+Brokenshire@users.noreply.github.com"
] |
29889878+Brokenshire@users.noreply.github.com
|
b34d5bebd57109d20aee7fec341878dfb3c9875c
|
31eaed64b0caeda5c5fe3603609402034e6eb7be
|
/python_zumbi/py_web/test_scraping_2.py
|
8504ae20c38d531160f7f991a12e83e59ccd487b
|
[] |
no_license
|
RaphaelfsOliveira/workspace_python
|
93657b581043176ecffb5783de208c0a00924832
|
90959697687b9398cc48146461750942802933b3
|
refs/heads/master
| 2021-01-11T17:39:49.574875
| 2017-06-28T20:55:43
| 2017-06-28T20:55:43
| 79,814,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
import urllib.request #modulo que permite conversar com a internet
pagina = urllib.request.urlopen(
'http://beans.itcarlow.ie/prices-loyalty.html')
text = pagina.read().decode('utf8')
print(text)
i = text.find('>$')
preco = float(text[i+2:i+6])
if preco < 4.74:
print('Em Promoção: ', preco)
else:
print('Está Caro!!: ', preco)
|
[
"raphaelbrf@gmail.com"
] |
raphaelbrf@gmail.com
|
d807a7d1a649fac018c6da8614952df89a7cdc5e
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_snowboard.py
|
5100be7c8c861988ab39e3be570cce2fce7b2eba
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#calss header
class _SNOWBOARD():
def __init__(self,):
self.name = "SNOWBOARD"
self.definitions = [u'to slide on the snow by standing on a specially shaped board: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
751a74264a973fe1ab989c874cc4a9a039bd45e4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_55/373.py
|
65e1a8ea83c63527538a4d324820da9d12a0a74e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
'''
Created on May 9, 2010
@author: indra
'''
import sys, os
filename = "C-large"
path = os.path.normpath(os.path.join(os.path.dirname(__file__), filename+".in"))
reader = open(path, "rb")
path = os.path.normpath(os.path.join(os.path.dirname(__file__), filename+".out"))
writer = open(path,"w")
ncases = int(reader.readline().rstrip())
caseno = 0
while caseno<ncases:
caseno+=1
case = reader.readline().rstrip()
R,k,N = [int(x) for x in case.split(' ')]
case = reader.readline().rstrip()
gps = [int(x) for x in case.split(' ')]
totp = 0
for gp in gps:
totp+=gp
print (R,k,N)
print gps
print totp
if totp<=k:
writer.write("Case #%s: %d\n" % (str(caseno),R*totp))
continue
rides = [-1]*N
money = [0]*N
retmon = 0
curloc = 0
curride = 0
curmon = 0
while rides[curloc]==-1 and curride<R:
rides[curloc] = curride
money[curloc] = curmon
curride+=1
tem=0
while tem+gps[curloc]<=k:
tem+=gps[curloc]
curloc+=1
if curloc>=N:
curloc-=N
curmon+=tem
if curride==R:
writer.write("Case #%s: %d\n" % (str(caseno),curmon))
continue
cycrides = curride - rides[curloc]
cycmoney = curmon - money[curloc]
R-=rides[curloc]
retmon+=money[curloc]
rleft = R%cycrides
retmon += cycmoney*((R-rleft)/cycrides)
lastrides = 0
while lastrides<rleft:
lastrides+=1
tem=0
while tem+gps[curloc]<=k:
tem+=gps[curloc]
curloc+=1
if curloc>=N:
curloc-=N
retmon+=tem
writer.write("Case #%s: %d\n" % (str(caseno),retmon))
writer.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
aba1b20ca910395e8e556c928a2bf6e5d53cdac8
|
2d8da5cacd21dd425688d67e1a92faa50aefc6bc
|
/excel-sheet-column-number.py
|
c90dd1c70703b45a9911aa35628d96708bba7730
|
[] |
no_license
|
stella-shen/Leetcode
|
970857edb74ae3ccf4bcce0c40e972ab8bcc5348
|
16ad99a6511543f0286559c483206c43ed655ddd
|
refs/heads/master
| 2021-01-19T02:48:49.918054
| 2018-11-29T10:36:43
| 2018-11-29T10:36:43
| 47,523,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
ret = 0
for i in xrange(len(s)):
ret *= 26
ret += ord(s[i]) - ord('A') + 1
return ret
if __name__ == '__main__':
sol = Solution()
s = "AB"
print sol.titleToNumber(s)
|
[
"szsxt818@gmail.com"
] |
szsxt818@gmail.com
|
d0686bbf88f5f164a24afb5e2449f189d6ba2b4b
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc008/B/4886377.py
|
4b2e8bb008ccb62443ac42cbdabfef1b5a1468e8
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
import collections
N = int(input())
names = [input() for i in range(N)]
max_ele = collections.Counter(names)
print(max_ele.most_common()[0][0])
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
2c7332530c6106c9f596a55673e138596fa175ad
|
be7a0aa49a9b4fdad1b8b21c6f1eb6bd508be109
|
/ex027vs1.py
|
8f5869722797ed74a9a1bd50c65b05a9267c8f63
|
[] |
no_license
|
tlima1011/python3-curso-em-video
|
29a60ee3355d6cb3ba8d1f48c6a3ecd7bc6e60dd
|
f6454f4d636a2bf73c151e67710f732e2d8e738c
|
refs/heads/master
| 2021-02-04T01:13:35.313590
| 2020-04-14T12:51:19
| 2020-04-14T12:51:19
| 243,593,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
nomeCompleto = str(input('Informe seu nome completo.: ')).strip()
nomeCompleto = nomeCompleto.split()
print(f'Primeiro nome é {nomeCompleto[0].capitalize()} e o último é {nomeCompleto[-1].capitalize()}')
|
[
"noreply@github.com"
] |
tlima1011.noreply@github.com
|
be9c106d741c93b8522ff5e49ea7ff2e5f2b74fe
|
aeeba89591b787bbe6b93ffb4889be9a8fca521e
|
/cfg.py
|
cf7791d7a7b0fe3603dac542a0bbc59c1ee3d3aa
|
[
"MIT"
] |
permissive
|
wistic/python-web-crawler
|
efa7968f66ecd7396797390f253d0ff68f3623a1
|
e3738fd49d77bdff4c43a0ec31ed36cc381d26b8
|
refs/heads/master
| 2022-12-10T05:38:40.030202
| 2020-08-28T14:24:38
| 2020-08-28T14:24:38
| 288,676,553
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
config = {
"root_url": "https://flinkhub.com",
"sleep_timer": 5,
"Max_links_limit": 5000,
"Recrawl_time_limit_hours": 24,
"user_agent": "Python Spiderbot",
"No_of_threads": 5,
"database_name": "python-web-crawler",
"collection_name": "Links",
"connection_uri": "mongodb://localhost:27017/",
"download_dir_path": "/home/wistic/github/python-web-crawler/html-files"
}
|
[
"prathameshkatkar11@gmail.com"
] |
prathameshkatkar11@gmail.com
|
6433092cbee060d537b5cb9919a76a1ec7c5ab85
|
683b73e0c95c755a08e019529aed3ff1a8eb30f8
|
/machina/apps/forum_conversation/forum_attachments/admin.py
|
de1995638c922ddee9447fdc8ec8937ae0ebd484
|
[
"BSD-3-Clause"
] |
permissive
|
DrJackilD/django-machina
|
b3a7be9da22afd457162e0f5a147a7ed5802ade4
|
76858921f2cd247f3c1faf4dc0d9a85ea99be3e1
|
refs/heads/master
| 2020-12-26T08:19:09.838794
| 2016-03-11T03:55:25
| 2016-03-11T03:55:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
# -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
from django.contrib import admin
# Local application / specific library imports
from machina.core.db.models import get_model
Attachment = get_model('forum_attachments', 'Attachment')
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('id', 'post', 'comment', 'file', )
list_display_links = ('id', 'post', 'comment', )
raw_id_fields = ('post', )
admin.site.register(Attachment, AttachmentAdmin)
|
[
"morgan.aubert@zoho.com"
] |
morgan.aubert@zoho.com
|
0e13ea228a661ee0d8e2c5bfce784e4d705a8f66
|
09b0075f56455d1b54d8bf3e60ca3535b8083bdc
|
/WideResnet.py
|
595e4f69f1baa13a9f27f80fdb61e54773195de4
|
[] |
no_license
|
YanYan0716/MPL
|
e02c1ddf036d6019c3596fd932c51c3a14187f5e
|
6ad82b050ec1ed81987c779df2dddff95dc1cde5
|
refs/heads/master
| 2023-04-17T23:05:54.164840
| 2021-05-07T01:14:49
| 2021-05-07T01:14:49
| 331,491,485
| 11
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,157
|
py
|
import os
from abc import ABC
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import config
class BasicBlock(layers.Layer):
def __init__(self, in_channels, out_channels, stride, dropout, name, trainable):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.dropout = dropout
# name = name
self.trainable = trainable
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.conv1 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv1',
)
self.bn2 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn2'
)
self.relu2 = layers.LeakyReLU(alpha=0.2)
self.dropout = layers.Dropout(
rate=self.dropout,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv2 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv2',
)
if self.stride != 1 or self.in_channels != self.out_channels:
self.short_cut_relu = layers.LeakyReLU(alpha=0.2)
self.short_cut = layers.Conv2D(
filters=self.out_channels,
kernel_size=1,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_shortcut'
)
self.add = layers.Add(name=name+'_add')
def call(self, inputs, **kwargs):
residual = inputs
out = self.bn1(inputs)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = out
out = self.relu1(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv2(out)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = self.short_cut_relu(residual)
residual = self.short_cut(residual)
# else:
# shortcut = out
out = self.add([residual, out])
return out
class WideResnet(keras.Model):
def __init__(self, k=[16, 32, 64, 128], name='wider'):
super(WideResnet, self).__init__(name=name)
self.k = k
self.dropout = config.DROPOUT
self.drop = layers.Dropout(
rate=config.DROPOUT,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv1 = layers.Conv2D(
filters=k[0],
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name + '_conv1',
)
self.Basic1 = BasicBlock(in_channels=k[0], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic1', trainable=True)
self.Basic2 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic2', trainable=True)
self.Basic3 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic3', trainable=True)
self.Basic4 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic4', trainable=True)
self.Basic5 = BasicBlock(in_channels=k[1], out_channels=k[2], stride=2, dropout=self.dropout, name=name+'_Basic5', trainable=True)
self.Basic6 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic6', trainable=True)
self.Basic7 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic7', trainable=True)
self.Basic8 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic8', trainable=True)
self.Basic9 = BasicBlock(in_channels=k[2], out_channels=k[3], stride=2, dropout=self.dropout, name=name+'_Basic9', trainable=True)
self.Basic10 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic10', trainable=True)
self.Basic11 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic11', trainable=True)
self.Basic12 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic12', trainable=True)
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.avgpool = layers.GlobalAveragePooling2D(name=name+'_avgpool')
self.dense = layers.Dense(
units=config.NUM_CLASS,
# kernel_initializer=keras.initializers.RandomNormal(mean=0., stddev=1.),
# activation='softmax',
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
name=name+'_dense',
)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.Basic1(x)
x = self.Basic2(x)
x = self.Basic3(x)
x = self.Basic4(x)
x = self.Basic5(x)
x = self.Basic6(x)
x = self.Basic7(x)
x = self.Basic8(x)
x = self.Basic9(x)
x = self.Basic10(x)
x = self.Basic11(x)
x = self.Basic12(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.avgpool(x)
x = self.drop(x)
out = self.dense(x)
return out
def model(self):
input = keras.Input(shape=(32, 32, 3), dtype=tf.float32)
return keras.Model(inputs=input, outputs=self.call(input))
if __name__ == '__main__':
img = tf.random.normal([1, 32, 32, 3])
model = WideResnet().model()
model.summary()
|
[
"yanqian0716@gmail.com"
] |
yanqian0716@gmail.com
|
b5d716b2740e66732492a580f7db8280232f261e
|
d3d8acc788bd3a8d7e5f861ad87c4d802723062b
|
/test/step3_descope200MCHF_HLT.py
|
c2272355f19530f27df01562b14bf70d1dee3ae4
|
[] |
no_license
|
calabria/L1IntegratedMuonTrigger
|
27ff0bde46208f84595423ec375080979fbe4c62
|
05a368b8d04f84b675d40445555f2cacfd135e4e
|
refs/heads/master
| 2021-01-24T21:57:42.232290
| 2015-08-11T11:52:35
| 2015-08-11T11:52:35
| 38,485,204
| 0
| 2
| null | 2015-08-11T11:52:35
| 2015-07-03T09:40:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,607
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.20
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3_descope200MCHF --fileout file:out_hlt_descope200MCHF.root --mc --eventcontent RECOSIM --step HLT --customise RecoParticleFlow/PandoraTranslator/customizeHGCalPandora_cff.cust_2023HGCalPandoraMuon,Configuration/DataProcessing/Utils.addMonitoring,L1Trigger/L1IntegratedMuonTrigger/phase2DescopingScenarios.descope200MCHF --datatier GEN-SIM-RECO --conditions PH2_1K_FB_V6::All --magField 38T_PostLS1 --filein file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root --geometry Extended2023HGCalMuon,Extended2023HGCalMuonReco --no_exec -n 10
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023HGCalMuonReco_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('HLTrigger.Configuration.HLT_GRun_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.20 $'),
annotation = cms.untracked.string('step3_descope200MCHF nevts:10'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:out_hlt_descope200MCHF.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'PH2_1K_FB_V6::All', '')
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule()
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.endjob_step,process.RECOSIMoutput_step])
# customisation of the process.
# Automatic addition of the customisation function from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff import cust_2023HGCalPandoraMuon
#call to customisation function cust_2023HGCalPandoraMuon imported from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
process = cust_2023HGCalPandoraMuon(process)
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios import descope200MCHF
#call to customisation function descope200MCHF imported from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
process = descope200MCHF(process)
# End of customisation functions
|
[
"sven.dildick@cern.ch"
] |
sven.dildick@cern.ch
|
6537118072122509e9adad7738eee5616a1b24dd
|
fc83fc10fcc509316e612d73bd40a81d3ca0a2e6
|
/tests/nd_gaussian_multiprocessing.py
|
1f8c698393e3a088d991eb3484785a391dc3c783
|
[
"MIT"
] |
permissive
|
DimitriMisiak/mcmc-red
|
47dfb7e0664205da55fa463df77851722082e3c3
|
caae0ce39d082e578176a5078a9184980b0851c3
|
refs/heads/main
| 2023-06-19T04:10:42.385862
| 2019-07-05T07:45:01
| 2019-07-05T07:45:01
| 387,757,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,928
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Handy MCMC scripts.
Test for the different fit method (mcmc, ptmcmc, minimizer).
Author:
Dimitri Misiak (misiak@ipnl.in2p3.fr)
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.signal as sgl
from os import path
import scipy.optimize as op
import mcmc_red as mcr
# close all plots
plt.close('all')
nsample = 1000
ndim = 4
SCALE = 'log'
### LINEAR SCALE
if SCALE == 'linear':
mu = np.random.uniform(-10, 10, ndim)
sigma = np.random.uniform(0, 10, ndim)
bounds = ((-20, 20),) * ndim
### LOG SCALE
elif SCALE == 'log':
mu_generator = np.random.uniform(-6, 0, ndim)
mu = 10**mu_generator
sigma = mu/10
bounds = ((1e-7, 1e1),) * ndim
else:
raise Exception('SCALE not set properly!')
print("Generating blob at mu={0} and sigma={1}".format(mu, sigma))
blob = np.random.normal(mu, sigma, (nsample, ndim))
print("Checking")
print("mean =", np.mean(blob, axis=0))
print("std =", np.std(blob, axis=0))
def chi2(param):
return mcr.chi2_simple(blob, param, sigma)
#def chi2(param):
# x2 = np.sum( (blob - np.array(param))**2 / np.array(sigma)**2 )
# return x2
condi = None
# XXX MCMC
# save directory
sampler_path = 'mcmc_sampler/autosave'
# extracts the sup bounds and the inf bounds
bounds = list(bounds)
binf = list()
bsup = list()
for b in bounds:
inf, sup = b
binf.append(inf)
bsup.append(sup)
binf = np.array(binf)
bsup = np.array(bsup)
# additionnal constrain as function of the parameters
if condi == None:
condi = lambda p: True
# Loglikelihood function taking into accounts the bounds
def loglike(x):
""" Loglikelihood being -chi2/2.
Take into account the bounds.
"""
cinf = np.sum(x<binf)
csup = np.sum(x>bsup)
if cinf == 0 and csup == 0 and condi(x) == True:
# return -0.5*aux(np.power(10,x))
return -0.5*chi2(x)
else:
return -np.inf
# running the mcmc analysis
sampler = mcr.mcmc_sampler_multi(loglike, bounds, nsteps=1000, path=sampler_path, threads=2, scale=SCALE)
#nwalkers=None
#nsteps=10000
#threads=4
##############################################################################
## extracts the sup bounds and the inf bounds
#bounds = list(bounds)
#binf = list()
#bsup = list()
#for b in bounds:
# inf, sup = b
# binf.append(inf)
# bsup.append(sup)
#binf = np.array(binf)
#bsup = np.array(bsup)
#
#condi = None
## additionnal constrain as function of the parameters
#if condi == None:
# condi = lambda p: True
#
## Loglikelihood function taking into accounts the bounds
#def loglike(x):
# """ Loglikelihood being -chi2/2.
# Take into account the bounds.
# """
# cinf = np.sum(x<binf)
# csup = np.sum(x>bsup)
# if cinf == 0 and csup == 0 and condi(x) == True:
## return -0.5*aux(np.power(10,x))
# return -0.5*chi2(x)
# else:china moon
# return -np.inf
#
## number of parameters/dimensions
#ndim = len(bounds)
#
## default nwalkers
#if nwalkers == None:
# nwalkers = 10 * ndim
#
## walkers are uniformly spread in the parameter space
#pos = list()
#for n in xrange(nwalkers):
# accept = False
# while not accept:
# new_pos = [
# np.random.uniform(low=l, high=h) for l,h in zip(binf, bsup)
# ]
# accept = condi(new_pos)
# pos.append(new_pos)
#
## MCMC analysis
#sampler = emcee.EnsembleSampler(nwalkers, ndim, loglike, threads=threads)
#sampler.run_mcmc(pos, nsteps, rstate0=np.random.get_state())
#############################################################################
# # loading the mcmc results
logd, chain, lnprob, acc = mcr.get_mcmc_sampler(sampler_path)
lab = tuple(['$\mu${}'.format(i) for i in range(ndim)])
dim = int(logd['dim'])
xopt, inf, sup = mcr.mcmc_results(dim, chain, lnprob, acc, lab,
scale=SCALE, savedir=sampler_path)
print(xopt, inf, sup)
|
[
"dimitrimisiak@gmail.com"
] |
dimitrimisiak@gmail.com
|
05c06ff5850ee1f5cbab0d42f5704ce5b0f4acb3
|
57d1580fd540b4819abb67f9db43fdfbba63725f
|
/hydrogen_notebooks/option_pricing/binomial_european_call_delta_hedging.py
|
29f3ca209e1b50cb4571fff0cac52d807c607296
|
[] |
no_license
|
glyfish/alpaca
|
49edfcb9d80551825dfa4cf071f21aeb95a3502f
|
2b5b69bcf50ed081a526742658be503706af94b4
|
refs/heads/master
| 2023-02-22T00:24:19.293502
| 2022-09-05T17:20:23
| 2022-09-05T17:20:23
| 186,169,438
| 1
| 3
| null | 2023-02-11T00:52:12
| 2019-05-11T18:38:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,302
|
py
|
# %%
%load_ext autoreload
%autoreload 2
import os
import sys
import numpy
from matplotlib import pyplot
from lib import config
from scipy.stats import binom
wd = os.getcwd()
yahoo_root = os.path.join(wd, 'data', 'yahoo')
pyplot.style.use(config.glyfish_style)
# %%
def qrn(U, D, R):
return (R - D) / (U - D)
def qrn1(q, U, R):
return q*(1.0 + U) / (1.0 + R)
def binomial_tail_cdf(l, n, p):
return 1.0 - binom.cdf(l, n, p)
def cutoff(S0, U, D, K, n):
for i in range(0, n + 1):
iU = (1.0 + U)**i
iD = (1.0 + D)**(n - i)
payoff = S0*iU*iD - K
if payoff > 0:
return i
return n + 1
def european_call_payoff(U, D, R, S0, K, n):
l = cutoff(S0, U, D, K, n)
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
return S0*Ψq1 - K*(1 + R)**(-n)*Ψq
def delta(CU, CD, SU, SD):
return (CU - CD) / (SU - SD)
def init_borrow(S0, C0, x):
return C0 - S0 * x
def borrow(y, R, x1, x2, S):
return y * (1 + R) + (x1 - x2) * S
def portfolio_value(x, S, y):
return x * S + y
# %%
n = 3
U = 0.2
D = -0.1
R = 0.1
S0 = 100.0
K = 105.0
# %%
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
l = cutoff(S0, U, D, K, n)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
q, q1, l, Ψq, Ψq1
binom.cdf(l, n, q)
# %
# t = 0
C0 = european_call_payoff(U, D, R, S0, K, n)
# %%
# Delta hedge
# t = 0
S1U = S0*(1.0 + U)
S1D = S0*(1.0 + D)
C1U = european_call_payoff(U, D, R, S1U, K, n - 1)
C1D = european_call_payoff(U, D, R, S1D, K, n - 1)
x1 = delta(C1U, C1D, S1U, S1D)
y1 = init_borrow(S0, C0, x1)
portfolio_value(x1, S0, y1)
# t = 1
# The price goes up S1 = S0*(1+U)
S1 = S0 * (1 + U)
S2U = S1*(1.0 + U)
S2D = S1*(1.0 + D)
C2U = european_call_payoff(U, D, R, S2U, K, n - 2)
C2D = european_call_payoff(U, D, R, S2D, K, n - 2)
x2 = delta(C2U, C2D, S2U, S2D)
y2 = borrow(y1, R, x1, x2, S1)
portfolio_value(x2, S1, y2)
# t = 2
# The price goes down S1 = S0*(1+U)*(1+D)
S2 = S0 * (1 + U) * (1 + D)
S3U = S2*(1.0 + U)
S3D = S2*(1.0 + D)
C3U = european_call_payoff(U, D, R, S3U, K, n - 3)
C3D = european_call_payoff(U, D, R, S3D, K, n - 3)
x3 = delta(C3U, C3D, S3U, S3D)
y3 = borrow(y2, R, x2, x3, S2)
portfolio_value(x3, S2, y3)
|
[
"troy.stribling@gmail.com"
] |
troy.stribling@gmail.com
|
ce5dade7d36a431e3ec81dade64648f6c22eca35
|
7832e7dc8f1583471af9c08806ce7f1117cd228a
|
/aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunClusterServiceActionRequest.py
|
eb1c959505c70fd4e06aa43388665c4d9f9b06a3
|
[
"Apache-2.0"
] |
permissive
|
dianplus/aliyun-openapi-python-sdk
|
d6494850ddf0e66aaf04607322f353df32959725
|
6edf1ed02994245dae1d1b89edc6cce7caa51622
|
refs/heads/master
| 2023-04-08T11:35:36.216404
| 2017-11-02T12:01:15
| 2017-11-02T12:01:15
| 109,257,597
| 0
| 0
|
NOASSERTION
| 2023-03-23T17:59:30
| 2017-11-02T11:44:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,508
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RunClusterServiceActionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunClusterServiceAction')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_HostIdList(self):
return self.get_query_params().get('HostIdList')
def set_HostIdList(self,HostIdList):
self.add_query_param('HostIdList',HostIdList)
def get_ServiceName(self):
return self.get_query_params().get('ServiceName')
def set_ServiceName(self,ServiceName):
self.add_query_param('ServiceName',ServiceName)
def get_ServiceActionName(self):
return self.get_query_params().get('ServiceActionName')
def set_ServiceActionName(self,ServiceActionName):
self.add_query_param('ServiceActionName',ServiceActionName)
def get_CustomCommand(self):
return self.get_query_params().get('CustomCommand')
def set_CustomCommand(self,CustomCommand):
self.add_query_param('CustomCommand',CustomCommand)
def get_ComponentNameList(self):
return self.get_query_params().get('ComponentNameList')
def set_ComponentNameList(self,ComponentNameList):
self.add_query_param('ComponentNameList',ComponentNameList)
def get_Comment(self):
return self.get_query_params().get('Comment')
def set_Comment(self,Comment):
self.add_query_param('Comment',Comment)
def get_IsRolling(self):
return self.get_query_params().get('IsRolling')
def set_IsRolling(self,IsRolling):
self.add_query_param('IsRolling',IsRolling)
def get_NodeCountPerBatch(self):
return self.get_query_params().get('NodeCountPerBatch')
def set_NodeCountPerBatch(self,NodeCountPerBatch):
self.add_query_param('NodeCountPerBatch',NodeCountPerBatch)
def get_TotlerateFailCount(self):
return self.get_query_params().get('TotlerateFailCount')
def set_TotlerateFailCount(self,TotlerateFailCount):
self.add_query_param('TotlerateFailCount',TotlerateFailCount)
def get_OnlyRestartStaleConfigNodes(self):
return self.get_query_params().get('OnlyRestartStaleConfigNodes')
def set_OnlyRestartStaleConfigNodes(self,OnlyRestartStaleConfigNodes):
self.add_query_param('OnlyRestartStaleConfigNodes',OnlyRestartStaleConfigNodes)
def get_TurnOnMaintenanceMode(self):
return self.get_query_params().get('TurnOnMaintenanceMode')
def set_TurnOnMaintenanceMode(self,TurnOnMaintenanceMode):
self.add_query_param('TurnOnMaintenanceMode',TurnOnMaintenanceMode)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
fa26cbfd0a0af998227fd24745c6f1b50a85ae34
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03046/s367901013.py
|
bd60026b909a76c85e533b517ac364ab9dac011a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
from sys import stdout
printn = lambda x: stdout.write(str(x))
inn = lambda : int(input())
inl = lambda: list(map(int, input().split()))
inm = lambda: map(int, input().split())
ins = lambda : input().strip()
DBG = True # and False
BIG = 999999999
R = 10**9 + 7
def ddprint(x):
if DBG:
print(x)
m,k = inm()
if m==0 and k==0:
print('0 0')
exit()
if m==0 and k>0:
print('-1')
exit()
if m==1 and k==0:
print('0 0 1 1')
exit()
if m==1 and k>0:
print('-1')
exit()
if k>=2**m:
print('-1')
exit()
if k==0:
printn('0 0')
for i in range(1,2**m):
printn(' {} {}'.format(i,i))
print('')
exit()
u = [False]*(2**m)
u[k] = True
a = []
cnt = 0
for i in range(1,2**m):
j = i^k
if not u[i] and not u[j]:
a.append(i)
u[j] = True
cnt += 1
if cnt==2**(m-1)-1:
break
s = [x for x in a]
t = [x for x in a]
t.reverse()
s.extend([0,k,0])
s.extend(t)
v = [x^k for x in a]
t = [x for x in v]
t.reverse()
s.extend(v)
s.append(k)
s.extend(t)
printn(s[0])
for i in range(1,len(s)):
printn(' ' + str(s[i]))
print("")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
354f4e8b11fc7deaae648a37d207d137f827d66e
|
0aa87ee2e544f56c17c2dde28a3b3feed08daa14
|
/apps/users/urls.py
|
6dda1d1373eadae3c77476250c17308642600204
|
[] |
no_license
|
yanshigou/mxonline
|
f2cc44724c1511418953e7e06d04661244b29455
|
cebc3295734713846828246fc54dd33f8df14f86
|
refs/heads/master
| 2022-12-09T12:11:05.734326
| 2022-08-17T10:38:13
| 2022-08-17T10:38:13
| 148,120,737
| 0
| 2
| null | 2022-12-08T02:58:15
| 2018-09-10T08:06:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'dzt'
__date__ = '2018/12/21 23:48'
from django.conf.urls import url
from .views import UserInfoView, UploadImageView, UpdatePwdView, SendEmailCodeView, UpdateEmailView, MyCourses
from .views import MyFavOrgView, MyFavTeacherView, MyFavCourseView, MyMessageView
urlpatterns = [
# 用户信息
url(r'^info/$', UserInfoView.as_view(), name='user_info'),
# 用户头像上传
url(r'^image/upload/$', UploadImageView.as_view(), name='image_upload'),
# 用户个人中心修改密码
url(r'^update/pwd/$', UpdatePwdView.as_view(), name='update_pwd'),
# 发送邮箱验证码
url(r'^sendemail_code/$', SendEmailCodeView.as_view(), name='sendemail_code'),
# 修改邮箱
url(r'^update_email/$', UpdateEmailView.as_view(), name='update_email'),
# 我的教程
url(r'^mycourses/$', MyCourses.as_view(), name='mycourses'),
# 我的收藏 直播机构
url(r'^myfav/org/$', MyFavOrgView.as_view(), name='myfav_org'),
# 我的收藏 主播
url(r'^myfav/teacher/$', MyFavTeacherView.as_view(), name='myfav_teacher'),
# 我的收藏 教程
url(r'^myfav/course/$', MyFavCourseView.as_view(), name='myfav_course'),
# 我的消息
url(r'^mymessage/$', MyMessageView.as_view(), name='mymessage'),
]
|
[
"569578851@qq.com"
] |
569578851@qq.com
|
506ab3ede97c112af86c4a23956ee39a25c9aecd
|
83b1a267809c08a57a3bb16c103d71539502a650
|
/job/migrations/0011_apply_created_at.py
|
c9ebca4b68d4fe3dc9d8d3052bdac004ee5816f8
|
[] |
no_license
|
rimatechcampus/django-jobboard-project-
|
c66933295b4692c7d3cb055dcf0cbaef80424b38
|
8823e1e7db011a4fbaa0fc87f1810bcd5dab08c6
|
refs/heads/master
| 2022-11-20T16:40:56.495550
| 2020-07-19T16:52:13
| 2020-07-19T16:52:13
| 279,794,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 3.0.8 on 2020-07-18 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0010_apply_job'),
]
operations = [
migrations.AddField(
model_name='apply',
name='created_at',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"riyamtechcampus@gmail.com"
] |
riyamtechcampus@gmail.com
|
47220864385f35b099736c3ef297a7ae7f1cbe54
|
ca08100b33a78c01bf49f097f4e80ed10e4ee9ad
|
/intrepidboats/apps/owners_portal/utils.py
|
605fe7065629b6a2f9983f3de5ed580162b6c11a
|
[] |
no_license
|
elite0401/intrepidpowerboats
|
347eae14b584d1be9a61ca14c014135ab0d14ad0
|
d2a475b60d17aa078bf0feb5e0298c927e7362e7
|
refs/heads/master
| 2021-09-11T01:51:47.615117
| 2018-04-06T02:20:02
| 2018-04-06T02:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,654
|
py
|
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext as _
def send_report_email(user_boat):
context = {
'user': user_boat.user,
'user_boat': user_boat,
'boat': user_boat.boat,
'site': Site.objects.get_current().domain,
'dashboard_url': reverse("owners_portal:owners_portal"),
}
send_mail(
subject=_("New boat report - Intrepid Powerboats"),
message=render_to_string('owners_portal/emails/report_email.txt', context),
from_email=settings.BUILD_A_BOAT['NO_REPLY_EMAIL_REPORTS'],
recipient_list=[user_boat.user.email],
html_message=render_to_string('owners_portal/emails/report_email.html', context),
)
def send_step_feedback_email(step_feedback):
context = {
'comments': step_feedback.comments,
'user': step_feedback.user,
'step': '{title} (phase: {phase})'.format(title=step_feedback.step.title, phase=step_feedback.step.phase),
'boat': '{boat} (model: {model})'.format(boat=step_feedback.step.user_boat,
model=step_feedback.step.user_boat.boat)
}
send_mail(
subject=_("{user} has sent feedback on {step} in Owner's portal - Intrepid Powerboats".format(
user=context['user'],
step=context['step'],
)),
message=render_to_string('owners_portal/emails/step_feedback_email.txt', context),
from_email=settings.NO_REPLY_EMAIL,
recipient_list=settings.TO_EMAIL['OWNERS_PORTAL_FEEDBACK_FORM'],
html_message=render_to_string('owners_portal/emails/step_feedback_email.html', context),
)
def send_new_shared_video_uploaded_email(shared_video):
from django.contrib.auth.models import User
admins = User.objects.filter(is_superuser=True)
subject = _("New uploaded video to vimeo")
to = admins.values_list('email', flat=True)
from_email = settings.NO_REPLY_EMAIL
site = Site.objects.get_current()
ctx = {
'user': shared_video.uploader,
'site': site.domain,
'admin_url': reverse("admin:owners_portal_sharedvideo_change", args=[shared_video.pk]),
}
message = render_to_string('owners_portal/emails/new_shared_video_email.txt', ctx)
html_message = render_to_string('owners_portal/emails/new_shared_video_email.html', ctx)
send_mail(subject=subject, message=message, from_email=from_email, recipient_list=to, html_message=html_message)
|
[
"elite.wisdom@gmx.com"
] |
elite.wisdom@gmx.com
|
2c4cfe1cd667b7a708c96b4978b00325826dfb19
|
0987f31e64bcacb41ba3a1e20054d7b8ac0d7346
|
/contests/panasonic2020/a.py
|
3c85e5a3a0a4b6b5ab170b052566849aab8ae7bf
|
[] |
no_license
|
masakiaota/kyoupuro
|
81ae52ab3014fb2b1e10472994afa4caa9ea463b
|
74915a40ac157f89fe400e3f98e9bf3c10012cd7
|
refs/heads/master
| 2021-06-27T04:13:52.152582
| 2020-09-20T03:21:17
| 2020-09-20T03:21:17
| 147,049,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
import sys
sys.setrecursionlimit(1 << 25)
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
def read_a_int():
return int(read())
def read_tuple(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(tuple(map(int, read().split())))
return ret
def read_col(H, n_cols):
'''
H is number of rows
n_cols is number of cols
A列、B列が与えられるようなとき
'''
ret = [[] for _ in range(n_cols)]
for _ in range(H):
tmp = list(map(int, read().split()))
for col in range(n_cols):
ret[col].append(tmp[col])
return ret
def read_matrix(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(list(map(int, read().split())))
return ret
# return [list(map(int, read().split())) for _ in range(H)] # 内包表記はpypyでは遅いため
def read_map(H):
'''
H is number of rows
文字列で与えられた盤面を読み取る用
'''
return [read()[:-1] for _ in range(H)]
def read_map_as_int(H):
'''
#→1,.→0として読み込む
'''
ret = []
for _ in range(H):
ret.append([1 if s == '#' else 0 for s in read()[:-1]])
# 内包表記はpypyでは若干遅いことに注意
# #numpy使うだろうからこれを残しておくけど
return ret
# default import
from collections import defaultdict, Counter, deque
from operator import itemgetter
from itertools import product, permutations, combinations
from bisect import bisect_left, bisect_right # , insort_left, insort_right
from fractions import gcd
def lcm(a, b):
# 最小公約数
g = gcd(a, b)
return a * b // g
a = [1, 1, 1, 2, 1, 2, 1, 5, 2, 2, 1, 5, 1, 2, 1, 14,
1, 5, 1, 5, 2, 2, 1, 15, 2, 2, 5, 4, 1, 4, 1, 51]
print(a[int(input()) - 1])
|
[
"aotamasakimail@gmail.com"
] |
aotamasakimail@gmail.com
|
b1c5a6fe4a11aa713099d0337893a6259fa2e086
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02973/s301790930.py
|
280647a2fd8669a6345ecf3a1ac6c75ef906c3dc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from sys import stdin
from bisect import bisect
N = int(stdin.readline().rstrip())
A = []
for i in range(N):
A.append(int(input()))
dp = []
for a in A[::-1]:
i = bisect(dp, a)
if i < len(dp):
dp[i] = a
else:
dp.append(a)
print(len(dp))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1d1dfcd44cf71fa592df181189c7efe1af6af40d
|
7a8560742946bfb95f4a252693264c34d4d0473d
|
/k2/centroid.py
|
e09491c999915180b3830fd138110d6e2140551a
|
[
"MIT"
] |
permissive
|
benmontet/K2-noise
|
3781e475ed6d5e2748a7ac3ddd878b8eec334254
|
a4b682cdf33f85d2dffc4cef115dcedacfccb4b4
|
refs/heads/master
| 2016-09-05T13:02:09.051080
| 2014-10-25T14:36:22
| 2014-10-25T14:36:22
| 22,899,258
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["centroid"]
import numpy as np
from functools import partial
from itertools import izip, imap
from .c3k import find_centroid
def centroid(tpf, **kwargs):
# Load the data.
data = tpf.read()
times = data["TIME"]
images = data["FLUX"]
quality = data["QUALITY"]
# Get rid of the bad times based on quality flags.
m = np.isfinite(times) * (quality == 0)
images[~m, :] = np.nan
f = partial(find_centroid, **kwargs)
return [times] + list(imap(np.array, izip(*(imap(f, images)))))
|
[
"danfm@nyu.edu"
] |
danfm@nyu.edu
|
02af91d9a068eb13b6123c2f26b025668f5bb79f
|
6eaf69ffd454ed6933e3395516246d878cb09781
|
/repozeldapapp/tests/functional/test_authentication.py
|
f998f67ccdc2ccc018c17f9cecb7cb08697d7a58
|
[] |
no_license
|
ralphbean/repoze-ldap-app
|
0d6658ef13b153736aaed6aa07fbdcaf65cbe1d9
|
cc00fe59bcc286fd44d1e22a14c40cfc8419e21d
|
refs/heads/master
| 2021-01-01T05:35:25.069715
| 2011-07-19T15:30:31
| 2011-07-19T15:30:31
| 2,072,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,583
|
py
|
# -*- coding: utf-8 -*-
"""
Integration tests for the :mod:`repoze.who`-powered authentication sub-system.
As repoze-ldap-app grows and the authentication method changes, only these tests
should be updated.
"""
from repozeldapapp.tests import TestController
class TestAuthentication(TestController):
"""Tests for the default authentication setup.
By default in TurboGears 2, :mod:`repoze.who` is configured with the same
plugins specified by repoze.what-quickstart (which are listed in
http://code.gustavonarea.net/repoze.what-quickstart/#repoze.what.plugins.quickstart.setup_sql_auth).
As the settings for those plugins change, or the plugins are replaced,
these tests should be updated.
"""
application_under_test = 'main'
def test_forced_login(self):
"""Anonymous users are forced to login
Test that anonymous users are automatically redirected to the login
form when authorization is denied. Next, upon successful login they
should be redirected to the initially requested page.
"""
# Requesting a protected area
resp = self.app.get('/secc/', status=302)
assert resp.location.startswith('http://localhost/login')
# Getting the login form:
resp = resp.follow(status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the initially requested page:
assert post_login.location.startswith('http://localhost/post_login')
initial_page = post_login.follow(status=302)
assert 'authtkt' in initial_page.request.cookies, \
"Session cookie wasn't defined: %s" % initial_page.request.cookies
assert initial_page.location.startswith('http://localhost/secc/'), \
initial_page.location
def test_voluntary_login(self):
"""Voluntary logins must work correctly"""
# Going to the login form voluntarily:
resp = self.app.get('/login', status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the home page:
assert post_login.location.startswith('http://localhost/post_login')
home_page = post_login.follow(status=302)
assert 'authtkt' in home_page.request.cookies, \
'Session cookie was not defined: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/'
def test_logout(self):
"""Logouts must work correctly"""
# Logging in voluntarily the quick way:
resp = self.app.get('/login_handler?login=manager&password=managepass',
status=302)
resp = resp.follow(status=302)
assert 'authtkt' in resp.request.cookies, \
'Session cookie was not defined: %s' % resp.request.cookies
# Logging out:
resp = self.app.get('/logout_handler', status=302)
assert resp.location.startswith('http://localhost/post_logout')
# Finally, redirected to the home page:
home_page = resp.follow(status=302)
authtkt = home_page.request.cookies.get('authtkt')
assert not authtkt or authtkt == 'INVALID', \
'Session cookie was not deleted: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/', home_page.location
|
[
"ralph.bean@gmail.com"
] |
ralph.bean@gmail.com
|
1b20703b930ae2d775880d83cd617d40c9cdfa18
|
ea867a1db2b730964b471e5f198ac74988417fa5
|
/steemtools/helpers.py
|
5c4e3a5d73bff0aa5310093de2799d44d516835b
|
[
"MIT"
] |
permissive
|
Denis007138/steemtools
|
0b58fa4bb2608c0134752b0855a36464cff9073a
|
c7f7ad9f482ff1b56e1218ceffbf574c95cf0c1f
|
refs/heads/master
| 2021-01-11T01:34:36.721177
| 2016-10-10T13:58:44
| 2016-10-10T13:58:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
import datetime
import re
import time
import dateutil
from dateutil import parser
from funcy import contextmanager, decorator
from werkzeug.contrib.cache import SimpleCache
@contextmanager
def timeit():
t1 = time.time()
yield
print("Time Elapsed: %.2f" % (time.time() - t1))
@decorator
def simple_cache(func, cache_obj, timeout=3600):
if type(cache_obj) is not SimpleCache:
return func()
name = "%s_%s_%s" % (func._func.__name__, func._args, func._kwargs)
cache_value = cache_obj.get(name)
if cache_value:
return cache_value
else:
out = func()
cache_obj.set(name, out, timeout=timeout)
return out
def read_asset(asset_string):
re_asset = re.compile(r'(?P<number>\d*\.?\d+)\s?(?P<unit>[a-zA-Z]+)')
res = re_asset.match(asset_string)
return {'value': float(res.group('number')), 'symbol': res.group('unit')}
def parse_payout(payout):
return read_asset(payout)['value']
def time_diff(time1, time2):
time1 = parser.parse(time1 + "UTC").timestamp()
time2 = parser.parse(time2 + "UTC").timestamp()
return time2 - time1
def is_comment(item):
if item['permlink'][:3] == "re-":
return True
return False
def time_elapsed(time1):
created_at = parser.parse(time1 + "UTC").timestamp()
now_adjusted = time.time()
return now_adjusted - created_at
def parse_time(block_time):
return dateutil.parser.parse(block_time + "UTC").astimezone(datetime.timezone.utc)
|
[
"_@furion.me"
] |
_@furion.me
|
be1ca56a4c8e33d679fe761dc4faa412b354bfa3
|
61e68e3a4d6cc841da4350dc193315822ca4e354
|
/lecture/4_정렬/4_퀵정렬.py
|
45420f20a5eaaae9aafb31ff3bea12843c0068c4
|
[] |
no_license
|
sswwd95/Algorithm
|
34360cd333019d6ded60f967c19aa70f1655e12a
|
a70bdf02580a39b9a5c282a04b0b2f8c2cb41636
|
refs/heads/master
| 2023-04-16T21:05:07.293929
| 2021-05-08T10:58:05
| 2021-05-08T10:58:05
| 362,651,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array, start, end):
if start >= end: # 원소가 1개인 경우 종료
return
pivot = start # 피벗은 첫 번째 원소
left = start + 1
right = end
while(left <= right):
# 피벗보다 큰 데이터를 찾을 때까지 반복
while(left <= end and array[left] <= array[pivot]):
left += 1
# 피벗보다 작은 데이터를 찾을 때까지 반복
while(right > start and array[right] >= array[pivot]):
right -= 1
if(left > right): # 엇갈렸다면 작은 데이터와 피벗을 교체
array[right], array[pivot] = array[pivot], array[right]
else: # 엇갈리지 않았다면 작은 데이터와 큰 데이터를 교체
array[left], array[right] = array[right], array[left]
# 분할 이후 왼쪽 부분과 오른쪽 부분에서 각각 정렬 수행
quick_sort(array, start, right - 1)
quick_sort(array, right + 1, end)
quick_sort(array, 0, len(array) - 1)
print(array)
# [0,1,2,3,4,5,6,7,8,9]
|
[
"sswwd95@gmail.com"
] |
sswwd95@gmail.com
|
b4ebea591ef98eba50becc2628f71215e816a37f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_84/306.py
|
0561a547b612e83a36f4cf677430a4ecdf3d37f6
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
import sys, math
from multiprocessing import Pool
def main(data):
R,C,s = data
for i in range(R):
for j in range(C):
try:
if s[i][j] == "#":
if s[i][j+1] == "#" and s[i+1][j] == "#" and s[i+1][j+1] == "#":
s[i][j] = "/"
s[i][j+1] = "\\"
s[i+1][j] = "\\"
s[i+1][j+1] = "/"
else:
return "Impossible"
except:
return "Impossible"
return "\n".join(["".join(l) for l in s])
if __name__ == "__main__":
mode = 0
if len(sys.argv) > 1:
f = open(sys.argv[1])
mode = 1
else:
f = open("test.txt")
T = int(f.readline())
data = []
for i in range(T):
R,C = map(int, f.readline().strip().split())
s = list()
for j in range(R):
s.append(list(f.readline().strip()))
data.append((R, C, s))
if mode == 1:
pool = Pool()
r = pool.map(main, data)
else:
r = map(main, data)
for i in range(T):
print "Case #%d: \n%s" % (i+1, r[i])
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
327203d439300f410de4e56199b07bcb7a5b1cb1
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level01.제일_작은_수_제거하기/Jaewon0702.py
|
9574b875696e370e939054a0279eb98293b8defd
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
def solution(arr):
arr.remove(min(arr))
return arr if len(arr) else [-1]
print(solution([4, 3, 2, 1]) == [4, 3, 2])
print(solution([10]) == [-1])
|
[
"45033215+sangmandu@users.noreply.github.com"
] |
45033215+sangmandu@users.noreply.github.com
|
bd9a420a7684d527bcd274c32086f85330ec970b
|
2704ad14c83050ac28f403371daa8e3148440e00
|
/chiadoge/wallet/did_wallet/did_info.py
|
2294be358c05f883b729c58c3c37a27b0b590ce5
|
[
"Apache-2.0"
] |
permissive
|
Bgihe/chiadoge-blockchain
|
d5e01a53c8e15fa17c47b44d9c95e6511aa98b7f
|
befb179c65ffe42aebbc47c211f78e193a095d2b
|
refs/heads/main
| 2023-06-01T05:31:51.503755
| 2021-07-05T20:47:32
| 2021-07-05T20:47:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
from dataclasses import dataclass
from typing import List, Optional, Tuple
from chiadoge.types.blockchain_format.sized_bytes import bytes32
from chiadoge.util.ints import uint64
from chiadoge.util.streamable import streamable, Streamable
from chiadoge.wallet.cc_wallet.ccparent import CCParent
from chiadoge.types.blockchain_format.program import Program
from chiadoge.types.blockchain_format.coin import Coin
@dataclass(frozen=True)
@streamable
class DIDInfo(Streamable):
origin_coin: Optional[Coin] # puzzlehash of this coin is our DID
backup_ids: List[bytes]
num_of_backup_ids_needed: uint64
parent_info: List[Tuple[bytes32, Optional[CCParent]]] # {coin.name(): CCParent}
current_inner: Optional[Program] # represents a Program as bytes
temp_coin: Optional[Coin] # partially recovered wallet uses these to hold info
temp_puzhash: Optional[bytes32]
temp_pubkey: Optional[bytes]
|
[
"83430349+lionethan@users.noreply.github.com"
] |
83430349+lionethan@users.noreply.github.com
|
093c9c5f1b37d499d6bb6486317cbdcbb89a838e
|
17b63416cf2f66246e1cf655ccfa2eb9a108da3c
|
/abupy/AlphaBu/ABuPickStockExecute.py
|
f344c2ed857ae0f8c94dc194d151f49cddb60f57
|
[] |
no_license
|
cmy00cmy/qtLearning
|
58aec5cf9fccf9d8f14adf1793306b8b8b5ecb7f
|
2b5fee7b9bbd832b20ba4e1b508be16b606249e0
|
refs/heads/master
| 2020-03-20T01:42:19.882639
| 2018-06-12T14:52:00
| 2018-06-12T14:52:00
| 137,085,926
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
# -*- encoding:utf-8 -*-
"""
包装选股worker进行,完善前后工作
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from .ABuPickStockWorker import AbuPickStockWorker
from ..CoreBu.ABuEnvProcess import add_process_env_sig
from ..MarketBu.ABuMarket import split_k_market
from ..TradeBu.ABuKLManager import AbuKLManager
from ..CoreBu.ABuFixes import ThreadPoolExecutor
__author__ = '阿布'
__weixin__ = 'abu_quant'
@add_process_env_sig
def do_pick_stock_work(choice_symbols, benchmark, capital, stock_pickers):
"""
包装AbuPickStockWorker进行选股
:param choice_symbols: 初始备选交易对象序列
:param benchmark: 交易基准对象,AbuBenchmark实例对象
:param capital: 资金类AbuCapital实例化对象
:param stock_pickers: 选股因子序列
:return:
"""
kl_pd_manager = AbuKLManager(benchmark, capital)
stock_pick = AbuPickStockWorker(capital, benchmark, kl_pd_manager, choice_symbols=choice_symbols,
stock_pickers=stock_pickers)
stock_pick.fit()
return stock_pick.choice_symbols
@add_process_env_sig
def do_pick_stock_thread_work(choice_symbols, benchmark, capital, stock_pickers, n_thread):
"""包装AbuPickStockWorker启动线程进行选股"""
result = []
def when_thread_done(r):
result.extend(r.result())
with ThreadPoolExecutor(max_workers=n_thread) as pool:
thread_symbols = split_k_market(n_thread, market_symbols=choice_symbols)
for symbols in thread_symbols:
future_result = pool.submit(do_pick_stock_work, symbols, benchmark, capital, stock_pickers)
future_result.add_done_callback(when_thread_done)
return result
|
[
"chenmyuan@163.com"
] |
chenmyuan@163.com
|
c08a05fcca3a38d83fa5e5c0f599e925d0a2c97b
|
56a4d0d73c349aeaca7580ca248caf0cf893a8c5
|
/w2/using_find.py
|
af6a320679d645b836416da8a37d141b0a0c269d
|
[] |
no_license
|
alejo8591/m101
|
79e62e0110bcc3e6ca82ac02ae3cdcbe13d51c67
|
d93d34a161ecede77defb9a6a3db389d4a9b0de8
|
refs/heads/master
| 2020-05-18T21:42:46.651036
| 2012-12-17T23:36:49
| 2012-12-17T23:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
#!/usr/bin/env python
import pymongo
import sys
connect = pymongo.Connection("mongodb://127.0.0.1", safe=True)
db = connect.school
scores = db.scores
def find():
print "Find, reporting for duty"
query = {'type':'exam'}
try:
iter = scores.find(query)
except:
print "Unexpected error:",sys.exc_info()[0]
sanity = 0
for doc in iter:
print doc
sanity+=1
if (sanity > 10):
break
def find_one():
print "find one, reporting for duty"
query = {'student_id':10}
try:
iter = scores.find_one(query)
except:
print "Unexpected error:",sys.exc_info()[0]
print iter
find_one()
find()
|
[
"alejo8591@gmail.com"
] |
alejo8591@gmail.com
|
b6a2760e083ef2662b8cb1a29ee20d3d09c6f19b
|
e76aa4de68988abcfceb7f90ea680505a9159995
|
/outrigger/__init__.py
|
358e6751f654522e24e8680c88312573f25843fb
|
[
"BSD-3-Clause"
] |
permissive
|
ggraham/outrigger
|
3ab1798fbeb3c871cae4d2a12bcd721032c3a96c
|
135388192bd8b15fc248653ee50943448ff19160
|
refs/heads/master
| 2021-05-26T09:58:02.547479
| 2020-04-29T19:32:34
| 2020-04-29T19:32:34
| 254,086,816
| 0
| 0
|
BSD-3-Clause
| 2020-04-29T19:32:35
| 2020-04-08T12:52:08
| null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Olga Botvinnik'
__email__ = 'olga.botvinnik@gmail.com'
__version__ = '1.1.1'
__all__ = ['psi', 'region', 'util', 'io', 'validate', 'index',
'common']
|
[
"olga.botvinnik@gmail.com"
] |
olga.botvinnik@gmail.com
|
a7c60b78f32abc44f71b77a5227cb86f6803806d
|
659d41f0c737dffc2a6ebd5e773a6513da32e5ba
|
/scripts/experiments/Experiments729/dephasing_scan_duration.py
|
adf770c56bb5fd14721f410bb6a9d3b6978b1e37
|
[] |
no_license
|
HaeffnerLab/sqip
|
b3d4d570becb1022083ea01fea9472115a183ace
|
5d18f167bd9a5344dcae3c13cc5a84213fb7c199
|
refs/heads/master
| 2020-05-21T23:11:10.448549
| 2019-11-21T02:00:58
| 2019-11-21T02:00:58
| 19,164,232
| 0
| 0
| null | 2019-11-04T04:39:37
| 2014-04-25T23:54:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,104
|
py
|
from common.abstractdevices.script_scanner.scan_methods import experiment
from excitations import excitation_dephase
from sqip.scripts.scriptLibrary.common_methods_729 import common_methods_729 as cm
from sqip.scripts.scriptLibrary import dvParameters
import time
import labrad
from labrad.units import WithUnit
from numpy import linspace
#The following command brinfgs the sequence plotter.
#from common.okfpgaservers.pulser.pulse_sequences.plot_sequence import SequencePlotter
class dephase_scan_duration(experiment):
name = 'Dephase Scan Duration'
dephasing_required_parameters = [
('Dephasing_Pulses', 'preparation_line_selection'),
('Dephasing_Pulses', 'evolution_line_selection'),
('Dephasing_Pulses','preparation_sideband_selection'),
('Dephasing_Pulses','evolution_sideband_selection'),
('Dephasing_Pulses', 'scan_interaction_duration'),
('TrapFrequencies','axial_frequency'),
('TrapFrequencies','radial_frequency_1'),
('TrapFrequencies','radial_frequency_2'),
('TrapFrequencies','rf_drive_frequency'),
]
@classmethod
def all_required_parameters(cls):
parameters = set(cls.dephasing_required_parameters)
parameters = parameters.union(set(excitation_dephase.all_required_parameters()))
parameters = list(parameters)
#removing parameters we'll be overwriting, and they do not need to be loaded
parameters.remove(('Dephasing_Pulses','evolution_ramsey_time'))
parameters.remove(('Dephasing_Pulses','evolution_pulses_frequency'))
parameters.remove(('Dephasing_Pulses','preparation_pulse_frequency'))
return parameters
def initialize(self, cxn, context, ident):
self.ident = ident
self.excite = self.make_experiment(excitation_dephase)
self.excite.initialize(cxn, context, ident)
self.scan = []
self.cxnlab = labrad.connect('192.168.169.49') #connection to labwide network
self.drift_tracker = cxn.sd_tracker
self.dv = cxn.data_vault
self.data_save_context = cxn.context()
self.setup_data_vault()
def setup_sequence_parameters(self):
p = self.parameters.Dephasing_Pulses
trap = self.parameters.TrapFrequencies
prep_line_frequency = cm.frequency_from_line_selection('auto', None, p.preparation_line_selection, self.drift_tracker)
frequency_preparation = cm.add_sidebands(prep_line_frequency, p.preparation_sideband_selection, trap)
#if same line is selected, match the frequency exactly
same_line = p.preparation_line_selection == p.evolution_line_selection
same_sideband = p.preparation_sideband_selection.aslist == p.evolution_sideband_selection.aslist
print 'same line', same_line
print 'same sideband', same_sideband
if same_line and same_sideband:
frequency_evolution = frequency_preparation
else:
evo_line_frequency = cm.frequency_from_line_selection('auto', None, p.evolution_line_selection, self.drift_tracker)
frequency_evolution = cm.add_sidebands(evo_line_frequency, p.evolution_sideband_selection, trap)
self.parameters['Dephasing_Pulses.preparation_pulse_frequency'] = frequency_preparation
self.parameters['Dephasing_Pulses.evolution_pulses_frequency'] = frequency_evolution
self.max_second_pulse = p.evolution_pulses_duration
minim,maxim,steps = self.parameters.Dephasing_Pulses.scan_interaction_duration
minim = minim['us']; maxim = maxim['us']
self.scan = linspace(minim,maxim, steps)
self.scan = [WithUnit(pt, 'us') for pt in self.scan]
def setup_data_vault(self):
localtime = time.localtime()
dirappend = [time.strftime("%Y%b%d",localtime) ,time.strftime("%H%M_%S", localtime)]
directory = ['','Experiments']
directory.extend([self.name])
directory.extend(dirappend)
self.dv.cd(directory, True,context = self.data_save_context)
def data_vault_new_trace(self):
localtime = time.localtime()
datasetNameAppend = time.strftime("%Y%b%d_%H%M_%S",localtime)
output_size = self.excite.output_size
dependants = [('Excitation','Ion {}'.format(ion),'Probability') for ion in range(output_size)]
self.dv.new('{0} {1}'.format(self.name, datasetNameAppend),[('Excitation', 'us')], dependants , context = self.data_save_context)
window_name = ['Dephasing, Scan Duration']
self.dv.add_parameter('Window', window_name, context = self.data_save_context)
self.dv.add_parameter('plotLive', True, context = self.data_save_context)
def run(self, cxn, context):
p = self.parameters.Dephasing_Pulses
self.data_vault_new_trace()
self.setup_sequence_parameters()
for i,interaction_duration in enumerate(self.scan):
should_stop = self.pause_or_stop()
if should_stop:
return False
second_pulse_dur = min(self.max_second_pulse, interaction_duration)
ramsey_time = max(WithUnit(0,'us'), interaction_duration - self.max_second_pulse)
#ramsey_time = WithUnit(0,'us')
p.evolution_ramsey_time = ramsey_time
p.evolution_pulses_duration = second_pulse_dur
self.excite.set_parameters(self.parameters)
excitation, readout = self.excite.run(cxn, context)
submission = [interaction_duration['us']]
submission.extend(excitation)
self.dv.add(submission, context = self.data_save_context)
self.update_progress(i)
self.save_parameters(self.dv, cxn, self.cxnlab, self.data_save_context)
####### FROM DYLAN -- PULSE SEQUENCE PLOTTING #########
#ttl = self.cxn.pulser.human_readable_ttl()
#dds = self.cxn.pulser.human_readable_dds()
#channels = self.cxn.pulser.get_channels().asarray
#sp = SequencePlotter(ttl.asarray, dds.aslist, channels)
#sp.makePlot()
############################################3
return True
def finalize(self, cxn, context):
pass
def update_progress(self, iteration):
progress = self.min_progress + (self.max_progress - self.min_progress) * float(iteration + 1.0) / len(self.scan)
self.sc.script_set_progress(self.ident, progress)
def save_parameters(self, dv, cxn, cxnlab, context):
measuredDict = dvParameters.measureParameters(cxn, cxnlab)
dvParameters.saveParameters(dv, measuredDict, context)
dvParameters.saveParameters(dv, dict(self.parameters), context)
if __name__ == '__main__':
cxn = labrad.connect()
scanner = cxn.scriptscanner
exprt = dephase_scan_duration(cxn = cxn)
ident = scanner.register_external_launch(exprt.name)
exprt.execute(ident)
|
[
"haeffnerlab@gmail.com"
] |
haeffnerlab@gmail.com
|
f5fb13e993e1f670fb944b04d958c11f4c9235e0
|
4a63c8e2545c6968547d7aa36c2dca85b9b84301
|
/workscheduler/src/backend/utils/datetime.py
|
88eb649edb561f5fec06a44475f4020eda3ac2b3
|
[] |
no_license
|
epirevolve/workscheduler
|
458b8da84da94862c91de6544c5aaaefc1520d47
|
6c89e7264c5b66f4eb91b1989da6324695449703
|
refs/heads/develop
| 2023-01-23T02:01:29.356940
| 2019-12-30T01:16:32
| 2019-12-30T01:16:32
| 147,050,241
| 5
| 2
| null | 2023-01-04T11:42:19
| 2018-09-02T03:10:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 207
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
def is_overlap(a_from: datetime, a_to: datetime, b_from: datetime, b_to: datetime):
return (b_from <= a_from <= b_to) or (b_from <= a_to <= b_to)
|
[
"epirevolve@gmail.com"
] |
epirevolve@gmail.com
|
2f74ae3f7caac57b707a98584b6bdd4a40ded6f8
|
fd1dba8223ad1938916369b5eb721305ef197b30
|
/AtCoder/ABC/abc110/abc110c.py
|
b19744afbe63b3698d7e3487b7f15813a0167d39
|
[] |
no_license
|
genkinanodesu/competitive
|
a3befd2f4127e2d41736655c8d0acfa9dc99c150
|
47003d545bcea848b409d60443655edb543d6ebb
|
refs/heads/master
| 2020-03-30T07:41:08.803867
| 2019-06-10T05:22:17
| 2019-06-10T05:22:17
| 150,958,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
S = input()
T = input()
n = len(S)
X = [[] for _ in range(26)]
Y = [[] for _ in range(26)]
for i in range(n):
s = ord(S[i]) - 97
t = ord(T[i]) - 97
X[s].append(i)
Y[t].append(i)
P = [tuple(x) for x in X]
Q = [tuple(y) for y in Y]
if set(P) == set(Q):
print('Yes')
else:
print('No')
|
[
"s.genki0605@gmail.com"
] |
s.genki0605@gmail.com
|
be63e415ecf5e1d3a8f53e768d4c23c1d1643511
|
cca21b0ddca23665f886632a39a212d6b83b87c1
|
/virtual/classroom/views.py
|
07712f42f10a68880ba8e8500e4a6784453a72e1
|
[] |
no_license
|
siumhossain/classroom
|
a8926621456d1e7ed77387fb8a5851825771a9d9
|
4afe9cdee2c58b71bd3711b042eae3f86172eaea
|
refs/heads/master
| 2023-02-02T08:28:14.958761
| 2020-12-24T14:58:59
| 2020-12-24T14:58:59
| 323,007,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,300
|
py
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView,DeleteView
from .models import Course
from django.contrib.auth.mixins import LoginRequiredMixin,PermissionRequiredMixin
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateResponseMixin,View
from .forms import ModuleFormSet
from django.forms.models import modelform_factory
from django.apps import apps
from .models import Module, Content
from braces.views import CsrfExemptMixin, JsonRequestResponseMixin
from django.db.models import Count
from .models import Subject
from django.views.generic.detail import DetailView
from students.forms import CourseEnrollForm
# Create your views here.
from django.views.generic.list import ListView
from .models import Course
class ManageCourseListView(ListView):
model = Course
template_name = 'courses/manage/course/list.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
class OwnerMixin(object):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
class OwnerEditMixin(object):
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class OwnerCourseMixin(OwnerMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
template_name = 'courses/manage/course/form.html'
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
pass
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
pass
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
class OwnerCourseMixin(OwnerMixin,LoginRequiredMixin,PermissionRequiredMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
permission_required = 'courses.view_course'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
permission_required = 'courses.add_course'
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
permission_required = 'courses.change_course'
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
permission_required = 'courses.delete_course'
class CourseModuleUpdateView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/formset.html'
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course,data=data)
def dispatch(self, request, pk):
self.course = get_object_or_404(Course,id=pk,owner=request.user)
return super().dispatch(request, pk)
def get(self, request, *args, **kwargs):
formset = self.get_formset()
return self.render_to_response({'course': self.course,'formset': formset})
def post(self, request, *args, **kwargs):
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = 'courses/manage/content/form.html'
def get_model(self, model_name):
if model_name in ['text', 'video', 'image', 'file']:
return apps.get_model(app_label='courses',model_name=model_name)
return None
def get_form(self, model, *args, **kwargs):
Form = modelform_factory(model, exclude=['owner','order','created','updated'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
self.module = get_object_or_404(Module,id=module_id,course__owner=request.user)
self.model = self.get_mode(model_name)
if id:
self.obj = get_object_or_404(self.model,id=id,owner=request.user)
return super().dispatch(request, module_id, model_name, id)
def get(self, request, module_id, model_name, id=None):
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,'object': self.obj})
def post(self, request, module_id, model_name, id=None):
form = self.get_form(self.model,instance=self.obj,data=request.POST,files=request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# new content
Content.objects.create(module=self.module,item=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form,'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,id=id,module__course__owner=request.user)
module = content.module
content.item.delete()
content.delete()
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/content_list.html'
def get(self, request, module_id):
module = get_object_or_404(Module,id=module_id,course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin,JsonRequestResponseMixin,View):
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin,JsonRequestResponseMixin,View):
def post(self, request):
for id, order in self.request_json.items():
Content.objects.filter(id=id,module__course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class CourseListView(TemplateResponseMixin, View):
model = Course
template_name = 'courses/course/list.html'
def get(self, request, subject=None):
subjects = Subject.objects.annotate(total_courses=Count('courses'))
courses = Course.objects.annotate(total_modules=Count('modules'))
if subject:
subject = get_object_or_404(Subject, slug=subject)
courses = courses.filter(subject=subject)
return self.render_to_response({'subjects': subjects,'subject': subject,'courses': courses})
class CourseDetailView(DetailView):
model = Course
template_name = 'courses/course/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['enroll_form'] = CourseEnrollForm(initial={'course':self.object})
return context
|
[
"sium.hossain@yahoo.com"
] |
sium.hossain@yahoo.com
|
d9c01472e3a355d2c744a3b72a0896f067997726
|
5fb9f29964268223869944508798d6c21d9e5298
|
/sub_test/sub_test.py
|
ea78eeb031a733544b22f4926dc7ead63ea94ff4
|
[] |
no_license
|
CodedQuen/Python-Pocket-Reference-
|
56459ce1509f74bc253af027be91935e62922948
|
8f7c69edb8ad4ac3ef7f70bab15ffe24eb162325
|
refs/heads/master
| 2022-06-14T20:57:13.799676
| 2020-05-05T08:27:17
| 2020-05-05T08:27:17
| 261,398,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
from subprocess import call, Popen, PIPE, check_output
print(call("ls -l", shell=True))
print(check_output("ls -l", shell=True).decode())
pipe1 = Popen("ls -l", stdout=PIPE, shell=True)
pipe2 = Popen("wc -l", stdin=pipe1.stdout, stdout=PIPE, shell=True)
print(pipe2.stdout.read().decode())
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
2b63046ccd7b852daa7ce8a78c6345d746f667f9
|
6c137e70bb6b1b618fbbceddaeb74416d387520f
|
/spyre/testing/cavity.py
|
1d95f5fa22fb580cf87be1fa538c49f3fa4ba85b
|
[
"BSD-2-Clause"
] |
permissive
|
zhong-lab/code
|
fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15
|
b810362e06b44387f0768353c602ec5d29b551a2
|
refs/heads/master
| 2023-01-28T09:46:01.448833
| 2022-06-12T22:53:47
| 2022-06-12T22:53:47
| 184,670,765
| 2
| 7
|
BSD-2-Clause
| 2022-12-08T21:46:15
| 2019-05-02T23:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 361
|
py
|
##Config file for lifetime_spyrelet.py in spyre/spyre/spyrelet/
# Device List
devices = {
'vna':[
'lantz.drivers.VNA.P9371A',
['TCPIP0::DESKTOP-ER250Q8::hislip0,4880::INSTR'],
{}
]
}
# Experiment List
spyrelets = {
'freqSweep':[
'spyre.spyrelets.cavity_spyrelet.Record',
{'vna': 'vna'},
{}
],
}
|
[
"none"
] |
none
|
992cbbcc8751d9aa132eea71a9c34ba42f5b03b4
|
4754226625d4a6b9680a22fd39166f502034aeb5
|
/samsung/[cutz]lab1.py
|
971e71a34d9cdfed878116d35cf9fd619e85ef26
|
[
"MIT"
] |
permissive
|
cutz-j/AlgorithmStudy
|
298cc7d6fa92345629623a9bd8d186f0608cdf7c
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
refs/heads/master
| 2021-07-01T03:15:51.627208
| 2021-02-24T01:24:44
| 2021-02-24T01:24:44
| 222,935,322
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
import sys
from itertools import combinations
class Queue():
def __init__(self):
self.front = 0
self.rear = 0
self.list = []
self.pop_count = 0
def append(self, x):
self.list.append(x)
self.rear += 1
def pop(self):
res = self.list[self.front]
self.front += 1
self.pop_count += 1
return res
def empty(self):
return len(self.list) == self.pop_count
res = 0
rl = lambda: sys.stdin.readline()
N, M = map(int, rl().split())
all_map = []
virus = []
zero = []
virus_num = sys.maxsize
for i in range(N):
tmp = list(map(int, rl().split()))
for j, v in enumerate(tmp):
if v == 2:
virus.append((i, j))
elif v == 0:
zero.append((i, j))
all_map.append(tmp)
row_dir, col_dir = [1, 0, -1, 0], [0, 1, 0, -1]
wall_comb = combinations(zero, 3)
for wall in wall_comb:
# visited = copy.deepcopy(all_map)
visited = []
for i in range(N):
tmp = []
for j in range(M):
tmp.append(all_map[i][j])
visited.append(tmp)
for w in wall:
visited[w[0]][w[1]] = 1
v_num = 0
queue = Queue()
for v in virus:
queue.append(v)
while queue.empty() == False:
r, c = queue.pop()
v_num += 1
if v_num > virus_num:
break
for i in range(4):
new_r, new_c = r + row_dir[i], c + col_dir[i]
if (0 <= new_r < N) and (0 <= new_c < M):
if visited[new_r][new_c] == 0:
queue.append((new_r, new_c))
visited[new_r][new_c] = 2
cnt, v_cnt = 0, 0
for i in range(N):
for j in range(M):
if visited[i][j] == 0:
cnt += 1
if visited[i][j] == 2:
v_cnt += 1
if cnt > res:
res = cnt
virus_num = v_cnt
print(res)
|
[
"cutz-j@naver.com"
] |
cutz-j@naver.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.