blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32c3139ec5e133b7b35001ec1912f6c53b0955b7 | 298c86756b741b4c0b706f5178fd26d6d3b63541 | /src/901_1000/0914_x-of-a-kind-in-a-deck-of-cards/x-of-a-kind-in-a-deck-of-cards.py | 27be2e51ef338dd61d63914194d3e4a84532a682 | [
"Apache-2.0"
] | permissive | himichael/LeetCode | c1bd6afd55479440c21906bf1a0b79a658bb662f | 4c19fa86b5fa91b1c76d2c6d19d1d2ef14bdff97 | refs/heads/master | 2023-02-12T07:25:22.693175 | 2023-01-28T10:41:31 | 2023-01-28T10:41:31 | 185,511,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | class Solution(object):
def hasGroupsSizeX(self, deck):
if not deck or len(deck)<2:
return False
N = len(deck)
count = [0 for _ in xrange(10000)]
for i in deck:
count[i] += 1
for x in xrange(2,N+1):
if N%x==0:
if all(v%x==0 for v in count):
return True
return False
# 最大公约数实现
def hasGroupsSizeX(self, deck):
if not deck or len(deck)<2:
return False
def gdc(a,b):
return a if not b else gdc(b,a%b)
N = len(deck)
count = [0 for _ in xrange(10000)]
res = -1
for i in deck:
count[i] += 1
for i in xrange(10000):
if count[i]>0:
if res==-1:
res = count[i]
else:
res = gdc(res,count[i])
return res>=2
# 用哈希表+GCD算法
def hasGroupsSizeX(self, deck):
"""
:type deck: List[int]
:rtype: bool
"""
if not deck or len(deck)<2:
return False
d = dict()
for i in deck:
d[i] = d.setdefault(i,0)+1
def gcd(a,b):
return a if not b else gcd(b,a%b)
x = d[deck[0]]
for k in d.values():
if k==1:
return False
x = gcd(x,k)
return x>=2
| [
"michaelwangg@qq.com"
] | michaelwangg@qq.com |
70e7402ac27a83b2882ab610cb92d0a9c8ae175d | 2cc3a920f0d21e92410dbd5d38bae43481f5cb87 | /utility/decorations.py | 0b8a2b7d3100a18e2f961c772a22547a771c63ac | [
"MIT"
] | permissive | DiegoDigo/ApiSchool | 998399cdf1b009cf2a530e37638d9e3d0c33ed6e | ce34e674e4154c41e91320956a578a0ec965e470 | refs/heads/master | 2020-03-21T18:20:01.080416 | 2018-07-01T16:03:13 | 2018-07-01T16:03:13 | 138,885,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG)
def log(method):
def call(func):
def give(*args):
logging.info(f'{method.capitalize()} a escola : {args[1].capitalize()}')
return func(*args)
return give
return call
| [
"di3g0d0ming05@gmail.com"
] | di3g0d0ming05@gmail.com |
cb2da70b9e808a01f452ea0fe996a9b7481b30a4 | 613fdf39f444cfd62a60adbea8e493e7bae85ec4 | /SDS/geometry/geometry_C_2162.py | cab393a0322e7aa6674e222724e6eb23ba928a37 | [] | no_license | Parkhyunseo/PS | bf854ff01ecbdf7ee866faf4178f988cf2ddf1ca | e2c4839c9ce6f217baafd204efbe3d9ea8ad1330 | refs/heads/master | 2020-04-04T01:46:57.907655 | 2019-06-21T08:36:25 | 2019-06-21T08:36:25 | 155,677,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,016 | py | from sys import stdin
from collections import namedtuple
N = int(stdin.readline())
Line2D = namedtuple('Line2D', ['v1','v2'])
Vector2D = namedtuple('Vector2D', ['x', 'y'])
def ccw(p1, p2, p3):
temp = p1.x*p2.y + p2.x*p3.y + p3.x*p1.y
temp = temp - (p1.y*p2.x + p2.y*p3.x + p3.y*p1.x)
if temp > 0:
return 1
elif temp < 0:
return -1
else:
return 0
def is_cross(l1, l2):
# l1과 l2의 v1 l1과 l2의 v2의 부호가 서로다른가.
# l2과 l1의 v1 l2과 l1의 v2의 부호가 서로다른가.
# 만약 모두 0이라면
# y값이 모두 같다면 y범위 안에 포함되는지
# x값이 모두 같다면 x범위 안에 포함되는지
cross = False
ccw1 = ccw(l1.v1, l1.v2, l2.v1)
ccw2 = ccw(l1.v1, l1.v2, l2.v2)
ccw3 = ccw(l2.v1, l2.v2, l1.v1)
ccw4 = ccw(l2.v1, l2.v2, l1.v2)
if ccw1 * ccw2 <= 0:
if ccw3 * ccw4 <= 0:
cross = True
if ccw1 == 0 and ccw2 == 0 and ccw3 == 0 and ccw4 == 0:
mn = min(l1.v1.x, l1.v2.x, l2.v1.x, l2.v2.x)
mx = max(l1.v1.x, l1.v2.x, l2.v1.x, l2.v2.x)
if mn != mx: # x좌표가 모두 같지 않다면
l1_x_mn = min(l1.v1.x, l1.v2.x)
l1_x_mx = max(l1.v1.x, l1.v2.x)
l2_x_mn = min(l2.v1.x, l2.v2.x)
l2_x_mx = max(l2.v1.x, l2.v2.x)
if l1_x_mx < l2_x_mn or l2_x_mx < l1_x_mn:
cross = False
else:
l1_y_mn = min(l1.v1.y, l1.v2.y)
l1_y_mx = max(l1.v1.y, l1.v2.y)
l2_y_mn = min(l2.v1.y, l2.v2.y)
l2_y_mx = max(l2.v1.y, l2.v2.y)
if l1_y_mx < l2_y_mn or l2_y_mx < l1_y_mn:
cross = False
return cross
def find(v):
if v == parent[v]:
return v
parent[v] = find(parent[v])
return parent[v]
def merge(v, u):
u = find(u)
v = find(v)
if u == v:
return
if rank[u] > rank[v]:
u, v = v, u
parent[u] = v
rank[v] += rank[u]
if rank[u] == rank[v]:
rank[v] += 1
parent = [ i for i in range(3001)]
rank = [ 1 for _ in range(3001) ]
lines = []
for i in range(N):
x1, y1, x2, y2 = map(int, stdin.readline().split())
lines.append(Line2D(Vector2D(x1,y1), Vector2D(x2,y2)))
for i in range(N-1):
for j in range(i+1, N):
l1 = lines[i]
l2 = lines[j]
if is_cross(l1, l2):
#print('is cross', i, j)
merge(i, j)
#print(parent)
#print(rank)
group_count = 0
rank_max = 0
for i in range(N):
if parent[i] == i:
group_count += 1
rank_max = max(rank_max, rank[i])
print(group_count)
print(rank_max)
| [
"hyeonseo9669@hanmail.net"
] | hyeonseo9669@hanmail.net |
998baf043d0bb44d4c8c8ad17d5e50c4c8773fcd | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/fast_linalg/SConscript | 1fd157e5714386a76f6e638d7e14f238be76fbd6 | [
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,835 | import libtbx.load_env
import sys
import os
from os import path
Import("env_etc")
env_etc.fast_linalg_dist = libtbx.env.dist_path("fast_linalg")
env_etc.fast_linalg_include = libtbx.env.under_dist("fast_linalg", "..")
env_etc.fast_linalg_common_includes = [
env_etc.libtbx_include,
env_etc.fast_linalg_include,
env_etc.boost_include,
]
def enable_fast_linalg(env):
if not libtbx.env.has_module('fast_linalg'):
return
env_etc.include_registry.append(
env=env,
paths=env_etc.fast_linalg_common_includes)
env.Append(LIBS=['boost_filesystem', 'fast_linalg'])
for flag in ("USE_FAST_LINALG",):
flag = "-D" + flag
env.Append(CXXFLAGS=flag)
env.Append(SHCXXFLAGS=flag)
env_etc.enable_fast_linalg = enable_fast_linalg
Import("env_base")
envs = [env_base]
if not env_etc.no_boost_python:
Import("env_no_includes_boost_python_ext")
envs.append(env_no_includes_boost_python_ext)
fast_linalg_envs = []
for env, extra_inc in zip(envs, ([], [env_etc.python_include])):
e = env.Clone()
env_etc.enable_fast_linalg(e)
env_etc.include_registry.append(env=e, paths=extra_inc)
fast_linalg_envs.append(e)
env_fast_linalg_base = fast_linalg_envs[0]
Export("env_fast_linalg_base")
if not env_etc.no_boost_python:
env_fast_linalg_boost_python_ext = fast_linalg_envs[1]
Export("env_fast_linalg_boost_python_ext")
env = env_fast_linalg_base.Clone()
if 'fast_linalg' in env['LIBS']:
env['LIBS'].remove('fast_linalg')
if sys.platform[:3] != 'win':
env['LIBS'].append("dl")
fast_linalg = env.SharedLibrary(target='#lib/fast_linalg', source=['np_ext.cpp'])
if not env_etc.no_boost_python:
env = env_fast_linalg_boost_python_ext.Clone()
fast_linalg_ext = env.SharedLibrary(target='#lib/fast_linalg_ext', source=['ext.cpp'])
Depends(fast_linalg_ext, fast_linalg)
SConscript("tests/SConscript")
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com | |
a5536ab89b0ca426610494c44f382a33064672d8 | 61e40900e69f73438bd903d8447f1625a80d6603 | /fuzzers/074-dump_all/node_names.py | 64bca5b847d5f4e254a411e1e40770d11b5f87aa | [
"ISC",
"LicenseRef-scancode-dco-1.1"
] | permissive | mithro/prjxray | b4249f5ef54ae2eff9f4c663cb837d2a5080bb8f | 77e8b24c883bd527b511413f1939c3a34a237c1c | refs/heads/master | 2022-06-20T13:21:00.687619 | 2020-12-26T22:39:29 | 2020-12-26T22:39:29 | 114,992,619 | 1 | 1 | null | 2017-12-21T10:16:50 | 2017-12-21T10:16:50 | null | UTF-8 | Python | false | false | 5,787 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" This script creates node_wires.json, which describes how nodes are named.
This script consumes the raw node data from root_dir and outputs
node_wires.json to the output_dir.
The class prjxray.node_model.NodeModel can be used to reconstruct node names
and node <-> wire mapping.
The contents of node_wires.json is:
- The set of tile type wires that are always nodes, key "node_pattern_wires"
- The set of tile wires that are nodes within the graph, key
"specific_node_wires".
"""
import argparse
import datetime
import json
import multiprocessing
import progressbar
import pyjson5 as json5
import os.path
from prjxray import util, lib
from prjxray.grid import Grid
def read_json5(fname):
with open(fname, 'r') as f:
return json5.load(f)
def main():
parser = argparse.ArgumentParser(
description="Reduce node names for wire connections.")
parser.add_argument('--root_dir', required=True)
parser.add_argument('--output_dir', required=True)
parser.add_argument('--max_cpu', type=int, default=10)
args = parser.parse_args()
_, nodes = lib.read_root_csv(args.root_dir)
processes = min(multiprocessing.cpu_count(), args.max_cpu)
pool = multiprocessing.Pool(processes=processes)
# Read tile grid and raw node data.
print('{} Reading tilegrid'.format(datetime.datetime.now()))
with open(os.path.join(util.get_db_root(), util.get_part(),
'tilegrid.json')) as f:
grid = Grid(db=None, tilegrid=json.load(f))
raw_node_data = []
with progressbar.ProgressBar(max_value=len(nodes)) as bar:
for idx, node in enumerate(pool.imap_unordered(
read_json5,
nodes,
chunksize=20,
)):
bar.update(idx)
raw_node_data.append(node)
bar.update(idx + 1)
node_wires = set()
remove_node_wires = set()
specific_node_wires = set()
# Create initial node wire pattern
for node in progressbar.progressbar(raw_node_data):
if len(node['wires']) <= 1:
continue
node_tile, node_wire = node['node'].split('/')
for wire in node['wires']:
wire_tile, wire_name = wire['wire'].split('/')
if node['node'] == wire['wire']:
assert node_tile == wire_tile
assert node_wire == wire_name
gridinfo = grid.gridinfo_at_tilename(node_tile)
node_wires.add((gridinfo.tile_type, wire_name))
print(
'Initial number of wires that are node drivers: {}'.format(
len(node_wires)))
# Remove exceptional node wire names, create specific_node_wires set,
# which is simply the list of wires that are nodes in the graph.
for node in progressbar.progressbar(raw_node_data):
if len(node['wires']) <= 1:
continue
for wire in node['wires']:
wire_tile, wire_name = wire['wire'].split('/')
gridinfo = grid.gridinfo_at_tilename(wire_tile)
key = gridinfo.tile_type, wire_name
if node['node'] == wire['wire']:
assert key in node_wires
else:
if key in node_wires:
specific_node_wires.add(node['node'])
remove_node_wires.add(key)
# Complete the specific_node_wires list after the pruning of the
# node_pattern_wires sets.
for node in progressbar.progressbar(raw_node_data):
if len(node['wires']) <= 1:
continue
for wire in node['wires']:
wire_tile, wire_name = wire['wire'].split('/')
gridinfo = grid.gridinfo_at_tilename(wire_tile)
key = gridinfo.tile_type, wire_name
if key in remove_node_wires and node['node'] == wire['wire']:
specific_node_wires.add(node['node'])
node_wires -= remove_node_wires
print(
'Final number of wires that are node drivers: {}'.format(
len(node_wires)))
print(
'Number of wires that are node drivers: {}'.format(
len(specific_node_wires)))
# Verify the node wire data.
for node in progressbar.progressbar(raw_node_data):
if len(node['wires']) <= 1:
continue
found_node_wire = False
for wire in node['wires']:
if wire['wire'] in specific_node_wires:
assert wire['wire'] == node['node']
found_node_wire = True
break
if not found_node_wire:
for wire in node['wires']:
wire_tile, wire_name = wire['wire'].split('/')
gridinfo = grid.gridinfo_at_tilename(wire_tile)
key = gridinfo.tile_type, wire_name
if key in node_wires:
assert node['node'] == wire['wire']
else:
assert node['node'] != wire['wire']
# Normalize output.
tile_types = {}
for tile_type, tile_wire in node_wires:
if tile_type not in tile_types:
tile_types[tile_type] = []
tile_types[tile_type].append(tile_wire)
for tile_type in tile_types:
tile_types[tile_type].sort()
out = {
'node_pattern_wires': tile_types,
'specific_node_wires': sorted(specific_node_wires),
}
with open(os.path.join(args.output_dir, 'node_wires.json'), 'w') as f:
json.dump(out, f, indent=2, sort_keys=True)
if __name__ == '__main__':
main()
| [
"537074+litghost@users.noreply.github.com"
] | 537074+litghost@users.noreply.github.com |
666ef7ae8454cf444e77e6c07f2807fef215d9ed | e174e13114fe96ad2a4eeb596a3d1c564ae212a8 | /Python for Finance Analyze Big Financial Data by Y. Hilpisch/Code of Python For Finance/4375OS_09_Code/4375_09_13_multiple_IRRs.py | efdba1da9ec9447d9063e834a25dab13a42060e3 | [] | no_license | Kevinqian0501/python_books | c1a7632d66dceb46db439f7cbed86d85370aab42 | 0691e4685af03a296aafb02447e3585db55ce461 | refs/heads/master | 2021-08-30T19:27:03.985464 | 2017-12-19T05:56:31 | 2017-12-19T05:56:31 | 104,145,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | """
Name : 4375OS_09_13_multiple_IRRs.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
def IRRs_f(cash_flows):
"""
Objective: find mutiple IRRs
e.g,
>>>x=[55,-50,-50,-50,100]
>>>IRRs_f(x)
[0.072, 0.337]
"""
n=1000
r=range(1,n)
n_cash_flow=len(cash_flows)
epsilon=abs(mean(cash_flows)*0.01)
irr=[-99.00]
j=1
npv=[]
for i in r: npv.append(0)
lag_sign=sign(npv_f(float(r[0]*1.0/n*1.0),cash_flows))
for i in range(1,n-1):
#print("r[i]",r[i])
interest=float(r[i]*1.0/n*1.0)
npv[i]=npv_f(interest,cash_flows)
s=sign(npv[i])
if s*lag_sign<0:
lag_sign=s
if j==1:
irr=[interest]
j=2
else:
irr.append(interest)
return irr
| [
"kevin@Qians-MacBook-Pro.local"
] | kevin@Qians-MacBook-Pro.local |
a8f3bf74062b00234952bf941310bc099acc2beb | 009df7ad499b19a4df066160cf0c7d8b20355dfb | /src/the_tale/the_tale/game/chronicle/tests/test_general.py | b4bdbdab14970d216e7e3ff723cbae0b0d11ba05 | [
"BSD-3-Clause"
] | permissive | devapromix/the-tale | c0804c7475e877f12f29444ddbbba025561d3412 | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | refs/heads/develop | 2020-03-28T20:26:30.492292 | 2018-10-07T17:32:46 | 2018-10-07T17:32:46 | 149,070,887 | 1 | 0 | BSD-3-Clause | 2018-10-07T17:32:47 | 2018-09-17T04:57:50 | Python | UTF-8 | Python | false | false | 355 | py |
import smart_imports
smart_imports.all()
class GeneralTests(utils_testcase.TestCase):
def setUp(self):
super(GeneralTests, self).setUp()
def test_every_bill_has_argument_getter(self):
self.assertCountEqual(list(signal_processors.BILL_ARGUMENT_GETTERS.keys()),
bills_relations.BILL_TYPE.records)
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
7afbc89d190fdbbb0bebbd5ed46b9452624255e6 | 43a78f0bcd94f617d2c55e5019f3f3475580165d | /GeeksForGeeks/Data Types/Strings/Old Style Formatting.py | 2a2c91a664ddbf200cde31eb63416b602e2c795c | [] | no_license | ctramm/Python_Training | 2c35bd36b7cd1ea6598f915fafcf37ca048cf8ed | a0864a82bd6fb002c5f1a9aa7fb5d0b18341e6b0 | refs/heads/master | 2022-12-04T14:18:30.477562 | 2022-11-12T09:03:25 | 2022-11-12T09:03:25 | 171,736,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # Python program for old style formatting of integers
Integer1 = 12.3456789
print("Formating in 3.2f format:")
print('The value of Integer1 is %3.2f' %Integer1)
print('\nFormatting in 3.4f format: ')
print('The value of Integer1 is %3.4f' %Integer1)
| [
"ctramm@wiley.com"
] | ctramm@wiley.com |
94f56802087e38330a83fc564735713c40bcb7f3 | cf945fb7c961376bfcff37c80fe50312d4f32290 | /Python3.5/DataStructure/C5_搜索/E3_DeepthSearch.py | e7473fde6dd14c0c83e81fb62769c083a7126596 | [] | no_license | lizhenQAZ/code_manage | faa1e805326cc8da8463e0f8820c9d092a04dddb | f98977d58a9febb8212652846314418bba37bfc7 | refs/heads/master | 2020-12-03T00:00:52.205238 | 2018-12-19T16:00:48 | 2018-12-19T16:00:48 | 95,968,266 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | class Node:
def __init__(self, elem=-1, lchild=None, rchild=None):
self.elem = elem
self.lchild = lchild
self.rchild = rchild
class Tree:
def __init__(self, root=None):
self.root = root
def add(self, elem):
node = Node(elem)
if self.root is None:
self.root = node
else:
queue = []
queue.append(self.root)
while queue:
root_node = queue.pop(0)
if root_node.lchild is None:
root_node.lchild = node
return
if root_node.rchild is None:
root_node.rchild = node
return
queue.append(root_node.lchild)
queue.append(root_node.rchild)
def preorder(self, root):
if root is None:
return
print(root.elem, end=' ')
self.preorder(root.lchild)
self.preorder(root.rchild)
def preorder_nonrecursive(self, root):
if root is None:
return
self.root = root
queue = []
while self.root or queue:
while self.root:
print(self.root.elem, end=' ')
queue.append(self.root)
self.root = self.root.lchild
node = queue.pop()
self.root = node.rchild
def inorder(self, root):
if root is None:
return
self.inorder(root.lchild)
print(root.elem, end=' ')
self.inorder(root.rchild)
def inorder_nonrecursive(self, root):
if root is None:
return
self.root = root
queue = []
while self.root or queue:
while self.root:
queue.append(self.root)
self.root = self.root.lchild
node = queue.pop()
print(node.elem, end=" ")
self.root = node.rchild
def postorder(self, root):
if root is None:
return
self.postorder(root.lchild)
self.postorder(root.rchild)
print(root.elem, end=' ')
def postorder_nonrecursive(self, root):
if root is None:
return
self.root = root
queue1 = []
queue2 = []
queue1.append(self.root)
while queue1:
node = queue1.pop()
if node.lchild:
queue1.append(node.lchild)
if node.rchild:
queue1.append(node.rchild)
queue2.append(node)
while queue2:
print(queue2.pop().elem, end=" ")
if __name__ == '__main__':
tree = Tree()
tree.add(11)
tree.add(22)
tree.add(3)
tree.add(77)
tree.add(66)
tree.add(88) # 11 22 3 77 66 88
# 11
# 22 3
# 77 66 88
tree.preorder(tree.root) # 11 22 77 66 3 88
print(' ')
tree.inorder(tree.root) # 77 22 66 11 88 3
print(' ')
tree.postorder(tree.root) # 77 66 22 88 3 11
print(' ')
# tree.preorder_nonrecursive(tree.root) # 11 22 77 66 3 88
# print(' ')
# tree.inorder_nonrecursive(tree.root) # 77 22 66 11 88 3
# print(' ')
tree.postorder_nonrecursive(tree.root) # 77 66 22 88 3 11
print(' ')
| [
"www.516960831@qq.com"
] | www.516960831@qq.com |
c04f91b1c63ec124253ad6cf3aea089d6b30ef8f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_53/360.py | 028e89a9be267984012ec51a845faf5831e4673e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | #!/usr/bin/python
pot2 = []
for i in range(31):
pot2.append((2**i)-1)
for i in range(input()):
N, K = map(int, raw_input().split())
if ((K & pot2[N]) == pot2[N]):
print 'Case #%s: ON' % (i + 1)
else:
print 'Case #%s: OFF' % (i + 1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
40d5e296721b28056adeaef87327595a9f91f5b5 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/baekjoon/2630.py | 8080d5d6431928ca3d289e15219caec4b97d2bfd | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import sys
def paperCount(x, y, n):
global arr, blue, white
check = arr[x][y]
for i in range(x, x + n):
for j in range(y, y + n):
if check != arr[i][j]:
paperCount(x, y, n // 2) # 1사분면
paperCount(x, y + n // 2, n // 2) # 2사분면
paperCount(x + n // 2, y, n // 2) # 3사분면
paperCount(x + n // 2, y + n // 2, n // 2) # 4사분면
return
if check==0:#모두 흰색일 때
white+=1
return
else: #모두 파란색일 때
blue+=1
return
N = int(input())
arr = []
blue = 0
white = 0
for _ in range(N):
arr.append(list(map(int, sys.stdin.readline().split())))
paperCount(0,0,N)
print(white)
print(blue) | [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
27dcd1f3556024f8e95a6210ce4a8e14b48105b6 | 377cbbe140fd0faf1eb53ba3794de816ac307cde | /src/dataset/info/NorbInfo.py | a2431696fc397c0d9ddccbdeac6219fd3f93aee4 | [
"MIT"
] | permissive | dhruvtapasvi/implementation | fcbd7ab8e7b1368a0f07ee41dc5f0b6d6708c206 | 964980f431517f4548a87172a05107cdf700fb84 | refs/heads/master | 2021-09-16T01:47:50.601661 | 2018-05-17T19:22:44 | 2018-05-17T19:22:44 | 114,498,055 | 1 | 0 | MIT | 2018-05-05T02:17:35 | 2017-12-16T23:59:13 | Python | UTF-8 | Python | false | false | 528 | py | from enum import Enum
from config import routes
NORB_RANGE = (0, 255)
NORB_VALIDATION_INSTANCES = 7
NORB_TEST_INSTANCES = 9
NORB_IMAGE_DIMENSIONS = (96, 96)
NORB_LABEL_DIMENSIONS = (6,)
NORB_ELEVATION_NAME = "NORB: ELEVATION ANGLE"
NORB_ELEVATION_FACTORS = (0, 6, 3, 8)
NORB_AZIMUTH_NAME = "NORB: AZIMUTH ANGLE"
NORB_AZIMUTH_FACTORS = (0, 8, 4, 12)
class NorbLabelIndex(Enum):
STEREO = 0
CATEGORY = 1
INSTANCE = 2
ELEVATION = 3
AZIMUTH = 4
LIGHTING = 5
NORB_HOME = routes.RESOURCE_ROUTE + "/norb"
| [
"dhruv.tapasvi1996@gmail.com"
] | dhruv.tapasvi1996@gmail.com |
c40ebfec42208b8d9cf7aae6f67610db3752df94 | 55b6af0fcfffe9beb48753f00c55102051b4bd35 | /src/main.py | a92b12accf632cd774955f730c4b61d18a08899a | [] | no_license | sobamchan/rte_baseline | bbe87bc6cf0ebe739aba86f973a1a2a8d60ac148 | cbda046d5d019476db6d4ca785451bdaef0cadcb | refs/heads/main | 2023-08-14T17:30:33.282871 | 2021-10-08T12:05:04 | 2021-10-08T12:05:04 | 414,965,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,401 | py | import json
import os
from argparse import ArgumentParser
from functools import partial
from typing import Any, Dict, List, Tuple, Union
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from lineflow.core import IterableDataset
from torch.utils.data import DataLoader
from transformers import AdamW, RobertaModel, RobertaTokenizer # type: ignore
def load_jsonl(dpath: str) -> List[Dict[str, Union[str, int]]]:
"""Load jsonline formatted file given its path."""
datas = []
with open(dpath, "r") as _f:
for line in _f.readlines():
datas.append(json.loads(line))
return datas
def preprocess(tokenizer: RobertaTokenizer, d: Dict[str, Union[str, int]]) -> Any:
"""Basic tokenization by pretrained tokenizer."""
model_inputs = tokenizer(d["s1"], d["s2"], return_tensors="pt", padding="max_length", max_length=256) # type: ignore
model_inputs["label"] = torch.LongTensor([int(d["label"])])
for k in ["input_ids", "attention_mask", "label"]:
model_inputs[k] = model_inputs[k].squeeze() # type: ignore
return model_inputs
def get_dataloaders(dpath: str, batch_size: int) -> Tuple[DataLoader, DataLoader]:
"""Load file, preprocess (tokenize), pack into pytorch dataloader."""
train_ds = IterableDataset(load_jsonl(os.path.join(dpath, "train.jsonl")))
val_ds = IterableDataset(load_jsonl(os.path.join(dpath, "val.jsonl")))
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
preprocessor = partial(preprocess, tokenizer)
train_ds = train_ds.map(preprocessor)
val_ds = val_ds.map(preprocessor)
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True) # type: ignore
val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=False) # type: ignore
return train_dl, val_dl
class Classifier(nn.Module):
"""Classification head to be on top of RoBERTa."""
def __init__(self, config):
super().__init__()
class_n = 2
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, class_n)
def forward(self, features):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class RTEModule(pl.LightningModule):
def __init__(self, hparams: Dict):
super().__init__()
self.model = RobertaModel.from_pretrained("roberta-base")
self.classifier = Classifier(config=self.model.config)
self.accuracy = pl.metrics.Accuracy() # type: ignore
self.save_hyperparameters(hparams)
def forward(self, batch: Dict):
# Get feature vectors from RoBERTa
out = self.model(
input_ids=batch["input_ids"], attention_mask=batch["attention_mask"]
)
# Take last hidden state from out, to know the format of `out` refer [here](https://huggingface.co/transformers/model_doc/roberta.html#transformers.RobertaModel.forward)
last_hidden_state = out[0]
logits = self.classifier(
last_hidden_state
) # Run classification given features.
return logits
def training_step(self, batch, _):
logits = self.forward(batch)
loss = F.cross_entropy(logits.view(-1, 2), batch["label"].view(-1))
self.log("train_loss", loss)
return loss
def validation_step(self, batch, _):
logits = self.forward(batch)
loss = F.cross_entropy(logits.view(-1, 2), batch["label"].view(-1))
self.log("val_loss", loss)
acc = self.accuracy(logits, batch["label"])
self.log("val_acc", acc)
return {"val_loss": loss, "val_acc": acc}
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters_roberta = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer_grouped_parameters_clf = [
{
"params": [
p
for n, p in self.classifier.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in self.classifier.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters_roberta + optimizer_grouped_parameters_clf,
lr=self.hparams["lr"],
)
return optimizer
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--lr", type=float, required=True, help="Learning rate")
parser.add_argument("--batch-size", type=int, required=True, help="Batch size")
parser.add_argument(
"--max-epochs", type=int, required=True, help="Maximum epochs to train."
)
parser.add_argument(
"--seed", type=int, required=True, help="Maximum epochs to train."
)
parser.add_argument(
"--dpath", type=str, required=True, help="Path to data directory"
)
parser.add_argument(
"--default-root-dir",
type=str,
required=True,
help="Path to save logs and trained models.",
)
parser.add_argument("--gpus", type=int, default=0, help="Number of GPUs to use")
args = parser.parse_args()
hparams = vars(args)
train_dl, val_dl = get_dataloaders(hparams["dpath"], hparams["batch_size"])
module = RTEModule(hparams)
trainer = pl.Trainer(
gpus=args.gpus,
default_root_dir=args.default_root_dir,
max_epochs=args.max_epochs,
)
trainer.fit(module, train_dl, val_dl)
| [
"oh.sore.sore.soutarou@gmail.com"
] | oh.sore.sore.soutarou@gmail.com |
a66380c5496088aa4f1310659b39d656cc76fd08 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_360/ch14_2020_03_03_12_13_23_023192.py | 10283fa8d0f938ee904ef22374db339b79cc82c8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import math
def calcula_distancia_do_projetil(v,y0,teta):
p1= (v**2)/(2*9.8) * (math.sin(2*teta))
p2= (1+1+((2*9.8*y0)/(v**2)*(math.sinh((teta**2)**1/2))
return (p1*p2) | [
"you@example.com"
] | you@example.com |
2e6efd643cc5e4b33ec8739b9b47153576fdbcc3 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /8SBG29RYLwTbGxn7T_17.py | ff3608ee682597adacc4088662c6093852732cf6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | """
Create a function that determines whether a shopping order is eligible for
free shipping. An order is eligible for free shipping if the total cost of
items purchased exceeds $50.00.
### Examples
free_shipping({ "Shampoo": 5.99, "Rubber Ducks": 15.99 }) ➞ False
free_shipping({ "Flatscreen TV": 399.99 }) ➞ True
free_shipping({ "Monopoly": 11.99, "Secret Hitler": 35.99, "Bananagrams": 13.99 }) ➞ True
### Notes
Ignore tax or additional fees when calculating the total order cost.
"""
def free_shipping(order):
prices = []
for i in order:
prices.append(order[i])
if sum(prices) >= 50.00:
return(True)
else:
return(False)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
90c2b8bceda1a8b774f00a143a85b08304bd8aeb | d17724b2ce056b435f57b16fb0cbea32e44a29c6 | /Gun3PY/nmap02.py | 5a04aa5937af6653ba8492227c6632c78c2aa040 | [] | no_license | UgurCIL/Examples | 27264d89131b4aaff46f91705a03779c4e825ad6 | c1722a519836a24c8a946380e6cbcd6da963f0c5 | refs/heads/master | 2020-04-24T15:28:17.288204 | 2019-02-22T13:30:35 | 2019-02-22T13:30:35 | 172,069,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | import nmap
nm = nmap.PortScanner()
nm.scan("10.10.1.1", "1-1000", arguments='--script bannser.nse')
print "-" * 30
print "[+] Komut: " + str(nm.command_line()) #ekrana calisan komutu basar
for host in nm.all_hosts(): #tarama sonucu donen tum IP adreslerinin uzerinden gecer
if nm[host].state() == "up": #host aktif mi
print "[+] Host Aktif: " + str(host)
for proto in nm[host].all_protocols(): #host ustundeki tum protokollerin uzerinden gecer
print "Protokol: " + str(proto)
portlar = nm[host][proto].keys() #host-protokol ustundeki tum portlarin uzerinden gecer
for port in portlar:
print "Port: {}\t Durumu: {}".format(port, nm[host][proto][port]["state"])
#######################################################
# NSE scriptlerinin sonuclarini burada duzenlemeli ve #
# kullanici icin anlamlı hale getirmeliyiz #
#######################################################
else:
print "[-] Host Down: " + str(host)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
3aa1b6f3ec34eff2d86949a3bc874e58db0c7fb6 | 57f47187c28464497252bf4faeab83f33bcdb9c7 | /functional_tests/test_list_item_validation.py | 964269899684fb5753399fc43263a9800bb31ad6 | [] | no_license | amazingguni/pomodoro-web | e059d9b1948c27c72230d555078b2a5a7facd539 | 8c13052816649ec9465973121e63222680c925ce | refs/heads/master | 2021-05-04T10:56:46.183598 | 2016-08-20T07:26:28 | 2016-08-20T07:26:28 | 53,728,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | from .base import FunctionalTest
class ItemValidationTest(FunctionalTest):
def get_error_element(self):
return self.browser.find_element_by_css_selector('.has-error')
def test_cannot_add_empty_list_items(self):
# 에디스는 메인 페이지에 접속해서 비니 아이템을 실수로 등록하려고 한다.
# 입력 상자가 비어 있는 상태에서 엔터키를 누른다.
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('\n')
# 페이지가 새로고침되고, 빈 아이템을 등록할 수 없다는
# 에러 메시지가 표시된다.
error = self.get_error_element()
self.assertEqual(error.text, "You can't have an empty list item")
# 다른 아이템을 입력하고 이번에는 정상 처리된다.
self.get_item_input_box().send_keys('우유사기\n')
self.check_for_row_in_list_table('1: 우유사기')
# 그녀는 고의적으로 다시 빈 아이템을 등록한다.
self.get_item_input_box().send_keys('\n')
# 리스트 페이지에 다시 에러 메시지가 표시된다.
self.check_for_row_in_list_table('1: 우유사기')
error = self.get_error_element()
self.assertEqual(error.text, "You can't have an empty list item")
# 아이템을 입력하면 정상 동작한다.
self.get_item_input_box().send_keys('tea 만들기\n')
self.check_for_row_in_list_table('1: 우유사기')
self.check_for_row_in_list_table('2: tea 만들기')
def test_cannot_add_duplicate_items(self):
# 에디스는 메인 페이지로 돌아가서 신규목록을 시작한다.
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('콜라 사기\n')
self.check_for_row_in_list_table('1: 콜라 사기')
# 실수로 중복 아이템을 입력한다
self.get_item_input_box().send_keys('콜라 사기\n')
# 도움이 되는 에러 메시지를 본
self.check_for_row_in_list_table('1: 콜라 사기')
error = self.get_error_element()
self.assertEqual(error.text, "이미 리스트에 해당 아이템이 있습니다")
def test_error_messages_are_cleaned_on_input(self):
# 에디스는 검증 에러를 발생시키도록 신규 목록을 시작한다
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('\n')
error = self.get_error_element()
self.assertTrue(error.is_displayed())
# 에러를 제거하기 위해 입력상자에 타이핑하기 시작한다
self.get_item_input_box().send_keys('a')
# 에러 메시지가 사라진 것을 보고 기뻐한다
error = self.get_error_element()
self.assertFalse(error.is_displayed())
| [
"amazingguni@gmail.com"
] | amazingguni@gmail.com |
a95e4c049406dd04cd6d40590beb853c62e01e36 | 99b1bf665ffb983f70ce85392652f25e96a620ad | /contig-correction/Snakefile | 4eaa27efa508605bf59c47d75118d27c27a51dfe | [] | no_license | faircloth-lab/phyluce-workflows | a5458face0a0c1ba71883f5f1743c20523933b73 | e4fd0f0ed689d5eb28f578b1ee66f504311420c9 | refs/heads/main | 2023-01-20T19:26:02.943663 | 2020-11-25T21:44:36 | 2020-11-25T21:44:36 | 301,446,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2020 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 05 October 2020 11:27 CDT (-0500)
DESCRIPTION
Workflow uses bcftools to generate clean consensus sequences from BAM
files - ideally filtering low quality/coverage bases in the process.
Output written to `consensus` folder and filtered SNP calls retained.
"""
#import pdb
configfile: "config.yaml"
rule all:
input:
expand("consensus/{sample}.consensus.fasta", sample=config["contigs"])
rule generate_pileups:
input:
contig = lambda wildcards: config["contigs"][wildcards.sample],
bam = lambda wildcards: config["bams"][wildcards.sample]
output:
temp("pileups/{sample}.calls.bcf")
threads: 1
shell:
"bcftools mpileup -Ou -f {input.contig} {input.bam} | bcftools call -m -Ob -o {output}"
rule normalize_calls:
input:
contig = lambda wildcards: config["contigs"][wildcards.sample],
pileup = "pileups/{sample}.calls.bcf"
output:
temp("normalized_pileups/{sample}.norm.bcf")
threads: 1
shell:
"bcftools norm --rm-dup all -f {input.contig} {input.pileup} -Ob -o {output}"
rule filter_norm_pileups:
input:
"normalized_pileups/{sample}.norm.bcf"
output:
"filtered_norm_pileups/{sample}.norm.flt-indels.Q20.DP10.bcf"
threads: 1
shell:
"bcftools filter --IndelGap 5 --SnpGap 5 --exclude 'QUAL<20 | DP<5 | AN>2' {input} -Ob -o {output}"
rule index_filterd_pileups:
input:
"filtered_norm_pileups/{sample}.norm.flt-indels.Q20.DP10.bcf"
output:
"filtered_norm_pileups/{sample}.norm.flt-indels.Q20.DP10.bcf.csi"
threads: 1
shell:
"bcftools index {input}"
rule generate_consensus:
input:
contig = lambda wildcards: config["contigs"][wildcards.sample],
bcf = "filtered_norm_pileups/{sample}.norm.flt-indels.Q20.DP10.bcf",
idx = "filtered_norm_pileups/{sample}.norm.flt-indels.Q20.DP10.bcf.csi"
output:
"consensus/{sample}.consensus.fasta"
threads: 1
shell:
"bcftools consensus -f {input.contig} --absent '.' {input.bcf} | python ./scripts/filter-missing-from-bcftools.py > {output}" | [
"brant@faircloth-lab.org"
] | brant@faircloth-lab.org | |
a05b499012fd5a82c7c0f973a6a90e6ad43ba483 | 6170016478a8767f8e3b77eaa314fb338883a107 | /launchdarkly_api/models/custom_property.py | 669d4cea3783b0fab7ffdfc3bab8210e44cc5c81 | [
"Apache-2.0"
] | permissive | code-haven/api-client-python | 13c2f9aef8ed1a04b2c3838744ab3603d7cd5304 | db8274a2ec380c967209aa6ae12e074145615f9f | refs/heads/master | 2020-07-23T06:40:50.382967 | 2019-09-06T20:22:17 | 2019-09-06T20:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,934 | py | # coding: utf-8
"""
LaunchDarkly REST API
Build custom integrations with the LaunchDarkly REST API # noqa: E501
OpenAPI spec version: 2.0.18
Contact: support@launchdarkly.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CustomProperty(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'value': 'list[str]'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None): # noqa: E501
"""CustomProperty - a model defined in Swagger""" # noqa: E501
self._name = None
self._value = None
self.discriminator = None
self.name = name
if value is not None:
self.value = value
@property
def name(self):
"""Gets the name of this CustomProperty. # noqa: E501
The name of the property. # noqa: E501
:return: The name of this CustomProperty. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CustomProperty.
The name of the property. # noqa: E501
:param name: The name of this CustomProperty. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def value(self):
"""Gets the value of this CustomProperty. # noqa: E501
Values for this property. # noqa: E501
:return: The value of this CustomProperty. # noqa: E501
:rtype: list[str]
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this CustomProperty.
Values for this property. # noqa: E501
:param value: The value of this CustomProperty. # noqa: E501
:type: list[str]
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CustomProperty, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomProperty):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"team@launchdarkly.com"
] | team@launchdarkly.com |
958f0566700d34656e72409f54b7079c6e6ae296 | 47ce68e1ff970318fd31ac43405d0e1fa3594bf6 | /Models/biGAN/BasicBiganXEntropyShallowerExtraGencTraining.py | 6bd00db4117de8b5cb4547d18fa1f6de23a2abab | [
"BSD-3-Clause"
] | permissive | Midoriii/Anomaly_Detection_Diploma | 7196da379f8aefbd4546ca23e8303d1829e059fb | 11145e3e5210a4e45a33d98b138213edb7bc5d3d | refs/heads/master | 2023-03-25T20:42:56.961210 | 2021-03-14T01:13:39 | 2021-03-14T01:13:39 | 261,205,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,943 | py | '''
Copyright (c) 2021, Štěpán Beneš
Basic bigAN net, using cross entropy as loss and shallower architecture
with extra G and E training
'''
import numpy as np
from Models.biGAN.BaseBiganModel import BaseBiganModel
from Models.Losses.custom_losses import wasserstein_loss
from Models.biGAN.weightclip_constraint import WeightClip
from keras.layers import Input, Reshape, Dense, Flatten, concatenate
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D, BatchNormalization, Dropout, LeakyReLU
from keras.models import Model
from keras.optimizers import RMSprop, Adam, SGD
class BasicBiganXEntropyShallowerExtraGencTraining(BaseBiganModel):
def __init__(self, input_shape, latent_dim=48, lr=0.0005, w_clip=0.01, batch_size=4):
super().__init__(input_shape, latent_dim, lr, w_clip, batch_size)
self.name = "BasicBiganXEntropyShallowerExtraGencTraining"
g_optimizer = Adam(lr=self.lr, beta_1=0.5)
d_optimizer = SGD(lr=self.lr)
self.disc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_fake = np.ones((self.batch_size, 1))
self.disc_labels_fake = np.ones((self.batch_size, 1))
self.d = self.build_discriminator()
self.d.compile(optimizer=d_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
self.g = self.build_generator()
self.e = self.build_encoder()
# The Discriminator part in GE model won't be trainable - GANs take turns.
# Since the Discrimiantor itself has been previously compiled, this won't affect it.
self.d.trainable = False
self.ge = self.build_ge_enc()
self.ge.compile(optimizer=g_optimizer, loss=['binary_crossentropy', 'binary_crossentropy'])
return
def build_generator(self):
z_input = Input(shape=[self.latent_dim])
x = Dense(24*24*32)(z_input)
x = Reshape([24, 24, 32])(x)
# 24 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 48 -> 96
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 96 -> 192
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 192 -> 384
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(1, (3, 3), activation='tanh', padding='same')(x)
return Model(inputs=z_input, outputs=x)
def build_encoder(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
# 384 -> 192
x = Conv2D(32, (3, 3), padding='same')(img_input)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 192 -> 96
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 96 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 48 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(self.latent_dim)(x)
return Model(inputs=img_input, outputs=x)
def build_discriminator(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
# Latent
l = Dense(256)(z_input)
l = LeakyReLU(0.1)(l)
l = Dense(256)(l)
l = LeakyReLU(0.1)(l)
# Image
x = Conv2D(64, (3, 3), padding='same')(img_input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# Joint
x = Flatten()(x)
x = concatenate([x, l])
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(1, activation='sigmoid')(x)
return Model(inputs=[img_input, z_input], outputs=x)
def build_ge_enc(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
fake_imgs = self.g(z_input)
critic_fake = self.d([fake_imgs, z_input])
fake_z = self.e(img_input)
critic_real = self.d([img_input, fake_z])
return Model(inputs=[img_input, z_input], outputs=[critic_real, critic_fake])
def train(self, images, epochs):
for epoch in range(epochs):
# D training
noise = self.latent_noise(self.batch_size, self.latent_dim)
img_batch = self.get_image_batch(images, self.batch_size)
fake_noise = self.e.predict(img_batch)
fake_img_batch = self.g.predict(noise)
d_real_loss = self.d.train_on_batch([img_batch, fake_noise], self.disc_labels_real)
self.dr_losses.append(d_real_loss[0])
self.dr_acc.append(d_real_loss[1])
d_fake_loss = self.d.train_on_batch([fake_img_batch, noise], self.disc_labels_fake)
self.df_losses.append(d_fake_loss[0])
self.df_acc.append(d_fake_loss[1])
d_loss = (0.5 * np.add(d_real_loss, d_fake_loss))
self.d_losses.append(d_loss[0])
# E+G training
ge_enc_loss = np.empty(3)
for _ in range(0, 5):
noise = self.latent_noise(self.batch_size, self.latent_dim)
img_batch = self.get_image_batch(images, self.batch_size)
ge_enc_loss += self.ge.train_on_batch([img_batch, noise],
[self.genc_labels_fake, self.genc_labels_real])
self.e_losses.append(ge_enc_loss[1]/5.0)
self.g_losses.append(ge_enc_loss[2]/5.0)
print("Epoch: " + str(epoch) + ", D loss: " + str(d_loss[0])
+ "; D acc: " + str(d_loss[1]) + "; E loss: " + str(ge_enc_loss[1]/5.0)
+ "; G loss: " + str(ge_enc_loss[2]/5.0))
return
| [
"stephen.Team24@gmail.com"
] | stephen.Team24@gmail.com |
f14570e9a9148eec246c2a9ad05c0655af553abb | 21553bf9df9db7458a25963ece522d8c27a7d598 | /piafedit/model/geometry/trajectory.py | 2975ef1564ee8f56115e0499d67a35b8f784829d | [
"MIT"
] | permissive | flegac/piaf-edit | 6ac0638ff2aef542da3f75b844080ea0e9771aa5 | d100231c0c60cd4412dd37b22a88fe7bb5fb4982 | refs/heads/main | 2023-05-10T22:07:50.709835 | 2021-05-30T18:17:58 | 2021-05-30T18:17:58 | 360,704,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | from typing import TypeVar, Generic, List
T = TypeVar('T')
class Trajectory(Generic[T]):
def __init__(self, trajectory: List[T]):
self.trajectory = trajectory
def iter(self, n: int):
for i in range(n):
a = i / (n - 1)
yield self.interpolate(a)
def interpolate(self, a: float) -> T:
points = len(self.trajectory)
intervals = points - 1
inter_size = 1. / intervals
for i in range(points - 1):
if a <= inter_size:
b = a / inter_size
return self.trajectory[i].interpolate(b, self.trajectory[i + 1])
else:
a -= inter_size
b = 1.0 + a
return self.trajectory[-2].interpolate(b, self.trajectory[-1])
| [
"florent.legac@gmail.com"
] | florent.legac@gmail.com |
4f785efd07f62c9c894e3570fdb55167daa3f18a | 651fc810476aaf8752909160013a491acbdea00c | /imperative/python/megengine/module/dropout.py | 8bf6213fa4e1df0bcd9d07f7f11ff2b6456de839 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | jonrzhang/MegEngine | 7cec9df6e561cc13e7a3292fd160f16b05305222 | 94b72022156a068d3e87bceed7e1c7ae77dada16 | refs/heads/master | 2021-04-22T18:33:16.837256 | 2021-03-16T02:27:55 | 2021-03-16T02:27:55 | 249,868,433 | 0 | 0 | NOASSERTION | 2021-03-16T02:27:55 | 2020-03-25T02:29:51 | null | UTF-8 | Python | false | false | 1,309 | py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ..functional import dropout
from .module import Module
class Dropout(Module):
r"""
Randomly sets input elements to zeros with the probability :math:`drop\_prob` during training.
Commonly used in large networks to prevent overfitting.
Note that we perform dropout only during training, we also rescale(multiply) the output tensor
by :math:`\frac{1}{1 - drop\_prob}`. During inference :class:`~.Dropout` is equal to :class:`~.Identity`.
:param drop_prob: The probability to drop (set to zero) each single element
"""
def __init__(self, drop_prob=0.0):
super().__init__()
self.drop_prob = drop_prob
def forward(self, inputs):
if self.training:
return dropout(inputs, self.drop_prob, training=True)
else:
return inputs
def _module_info_string(self) -> str:
return "drop_prob={drop_prob}".format(drop_prob=self.drop_prob)
| [
"megengine@megvii.com"
] | megengine@megvii.com |
ae4dab755dc271424119097ef054be7783d21425 | 6268655719a46c9d2b6b38ea21babd8b877724dd | /ecom/migrations/0005_leadsection.py | bc01996abf1ce76a18dea1b1c916d1751e08ca96 | [] | no_license | MahmudulHassan5809/Ecommerce-Django | f84b968621eed61fdf08c55cd43c7a09d8bc8ba7 | f416536a6b5ce583283139e7271f3fcd1da49739 | refs/heads/master | 2022-12-31T15:39:34.405140 | 2020-10-24T18:15:38 | 2020-10-24T18:15:38 | 292,297,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # Generated by Django 3.0.7 on 2020-09-05 16:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ecom', '0004_product_discount'),
]
operations = [
migrations.CreateModel(
name='LeadSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_lead', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='category_first_lead', to='ecom.Category')),
('second_lead', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='category_second_lead', to='ecom.Category')),
('third_lead', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='category_third_lead', to='ecom.Category')),
],
options={
'verbose_name_plural': '3.Lead Sections',
},
),
]
| [
"mahmudul.hassan240@gmail.com"
] | mahmudul.hassan240@gmail.com |
48b022333a2f33b842ba7c6e9d0341b085c60c78 | 53abcba37ef0fd69bd90453b175f936edcca842c | /Facebook/56.py | f9555a2c795b1fb7eea599997c65283ab4705cfc | [] | no_license | cloi1994/session1 | 44db8fa6d523d4f8ffe6046969f395e8bbde9e40 | 9a79fd854e9842050da07f9c9b0ce5cadc94be89 | refs/heads/master | 2020-03-09T05:15:51.510027 | 2018-06-13T04:41:43 | 2018-06-13T04:41:43 | 128,608,752 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if not intervals:
return []
intervals.sort(key = lambda x:x.start)
res = [intervals[0]]
end = intervals[0].end
for i in range(1,len(intervals)):
if end >= intervals[i].start:
end = max(intervals[i].end,end)
else:
res.append(intervals[i])
return res
| [
"noreply@github.com"
] | cloi1994.noreply@github.com |
fd8aa3bdef0487a3405b1237f11c86d8c8006b65 | 407b47de02072ea986d0812f5190fc7f258c6c1a | /codeforces1033Bsquaredifference.py | 30a80195a6f9a6dbbeb78581cdd118a9211a5ec2 | [] | no_license | sreyansb/Codeforces | c0a472940bafb020c914d45d6b790d5f75513dff | 12baa08a32837dcea0ee8a2bf019f0551e009420 | refs/heads/master | 2023-01-04T11:33:14.749438 | 2020-10-28T08:20:44 | 2020-10-28T08:20:44 | 275,894,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | t=int(input())
for i in range(t):
l=input().split()
if int(l[0])-int(l[1])==1:
flag=0
for j in range(2,int(pow((int(l[0])+int(l[1])),0.5))+1):
if (int(l[0])+int(l[1]))%j==0:
flag=1
break
if flag==0:
print("YES")
else:
print("NO")
else:
print("NO")
| [
"sreyansrocks@gmail.com"
] | sreyansrocks@gmail.com |
0445df73a7c53d35940af534988270c9f6d1638f | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Kivy/kivy/examples/widgets/popup_with_kv.py | 02a39467b8bbf03925309c7f0e774502c11744c6 | [
"MIT"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1390e4bfcb00405a7d0abae5ceeceb1105628de1311ca190100c95f0958e6c65
size 642
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
2ef9499135833f5261798b13e1b06863a0931a3e | 42a8084d227dced8cebf20dbff7852a4f70b5562 | /John/Python/quadmap_example/quad.py | 400db05aca861097fff38b47a2e44683280cd5f7 | [
"MIT"
] | permissive | fagan2888/WAMS-2017 | aa2b010fa7ff21785896edac57007407666d64bb | dfda99c56ee8e120cd1c385c6e65d97f594f5bb9 | refs/heads/master | 2021-02-14T09:06:59.200144 | 2017-08-16T03:36:36 | 2017-08-16T03:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | import numpy as np
import time
from numba import jit
@jit
def quad(x0, n):
x = x0
for i in range(1, n):
x = 4.0 * x * (1.0 - x)
return x
x = quad(0.2, 10)
n = 10_000_000
t = time.time()
x = quad(0.2, n)
elapsed = time.time() - t
print("last val = {}".format(x))
print("elapsed time = {}".format(elapsed))
| [
"john.stachurski@gmail.com"
] | john.stachurski@gmail.com |
e55061a5c5747e6d6966bd8c0b41939c1a6c4e5a | 35be0509b6f98030ef5338033468710de1a536a3 | /nova/nova/virt/images.py | 6f3e48715b89d2983281dca62d1b2e64b6e38e19 | [
"Apache-2.0"
] | permissive | yizhongyin/OpenstackLiberty | 6f2f0ff95bfb4204f3dbc74a1c480922dc387878 | f705e50d88997ef7473c655d99f1e272ef857a82 | refs/heads/master | 2020-12-29T02:44:01.555863 | 2017-03-02T06:43:47 | 2017-03-02T06:43:47 | 49,924,385 | 0 | 1 | null | 2020-07-24T00:49:34 | 2016-01-19T03:45:06 | Python | UTF-8 | Python | false | false | 6,183 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of VM disk images.
"""
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from nova import exception
from nova.i18n import _, _LE
from nova import image
from nova.openstack.common import imageutils
from nova import utils
LOG = logging.getLogger(__name__)
image_opts = [
cfg.BoolOpt('force_raw_images',
default=True,
help='Force backing images to raw format'),
]
CONF = cfg.CONF
CONF.register_opts(image_opts)
IMAGE_API = image.API()
def qemu_img_info(path, format=None):
"""Return an object containing the parsed output from qemu-img info."""
# TODO(mikal): this code should not be referring to a libvirt specific
# flag.
# NOTE(sirp): The config option import must go here to avoid an import
# cycle
CONF.import_opt('images_type', 'nova.virt.libvirt.imagebackend',
group='libvirt')
if not os.path.exists(path) and CONF.libvirt.images_type != 'rbd':
msg = (_("Path does not exist %(path)s") % {'path': path})
raise exception.InvalidDiskInfo(reason=msg)
cmd = ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path)
if format is not None:
cmd = cmd + ('-f', format)
out, err = utils.execute(*cmd)
if not out:
msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") %
{'path': path, 'error': err})
raise exception.InvalidDiskInfo(reason=msg)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, in_format, out_format, run_as_root=False):
"""Convert image to other format."""
if in_format is None:
raise RuntimeError("convert_image without input format is a security"
"risk")
_convert_image(source, dest, in_format, out_format, run_as_root)
def convert_image_unsafe(source, dest, out_format, run_as_root=False):
"""Convert image to other format, doing unsafe automatic input format
detection. Do not call this function.
"""
# NOTE: there is only 1 caller of this function:
# imagebackend.Lvm.create_image. It is not easy to fix that without a
# larger refactor, so for the moment it has been manually audited and
# allowed to continue. Remove this function when Lvm.create_image has
# been fixed.
_convert_image(source, dest, None, out_format, run_as_root)
def _convert_image(source, dest, in_format, out_format, run_as_root):
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
if in_format is not None:
cmd = cmd + ('-f', in_format)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_href, path, _user_id, _project_id, max_size=0):
with fileutils.remove_path_on_error(path):
IMAGE_API.download(context, image_href, dest_path=path)
def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
def fetch_to_raw(context, image_href, path, user_id, project_id, max_size=0):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, user_id, project_id,
max_size=max_size)
with fileutils.remove_path_on_error(path_tmp):
data = qemu_img_info(path_tmp)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_href)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(image_id=image_href,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
# We can't generally shrink incoming images, so disallow
# images > size of the flavor we're booting. Checking here avoids
# an immediate DoS where we convert large qcow images to raw
# (which may compress well but not be sparse).
# TODO(p-draigbrady): loop through all flavor sizes, so that
# we might continue here and not discard the download.
# If we did that we'd have to do the higher level size checks
# irrespective of whether the base image was prepared or not.
disk_size = data.virtual_size
if max_size and max_size < disk_size:
LOG.error(_LE('%(base)s virtual size %(disk_size)s '
'larger than flavor root disk size %(size)s'),
{'base': path,
'disk_size': disk_size,
'size': max_size})
raise exception.FlavorDiskSmallerThanImage(
flavor_size=max_size, image_size=disk_size)
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
with fileutils.remove_path_on_error(staged):
convert_image(path_tmp, staged, fmt, 'raw')
os.unlink(path_tmp)
data = qemu_img_info(staged)
if data.file_format != "raw":
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("Converted to raw, but format is now %s") %
data.file_format)
os.rename(staged, path)
else:
os.rename(path_tmp, path)
| [
"yizhongyin@os-easy.com"
] | yizhongyin@os-easy.com |
37560936c83c5009ef3b8b7a7f7728799e38d652 | 38d1e0b40d9cc54e4aa272ae5c2872fca378002a | /python_stack/django/django_full_stack/MatchmakingProject/LoginApp/models.py | 303b40db7150ee0c65a0e08604106180471cfb08 | [] | no_license | taichikoga/Dojo_Assignments | 0a8974a6fcb3ce83973fd481803f1bb7126ca3ba | 4c7e82bd652286d281ce86fe9c14491182c3ecde | refs/heads/master | 2022-11-22T23:19:02.431639 | 2020-07-20T17:53:37 | 2020-07-20T17:53:37 | 274,190,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,510 | py | from django.db import models
import re, bcrypt
# Create your models here.
class UserManager(models.Manager):
def reg_validator(self, post_data):
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
PASS_REGEX = re.compile(r"^(?=.*[A-Za-z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!%*#?&]{8,16}$")
errors = {}
# first name validations
if len(post_data['first_name']) == 0:
errors['first_name'] = "Your first name cannot be blank"
elif len(post_data['first_name']) < 2:
errors['first_name'] = "Your first name should be at least 2 characters long."
# last name validations
if len(post_data['last_name']) == 0:
errors['last_name'] = "Your last name cannot be blank."
elif len(post_data['last_name']) < 2:
errors['last_name'] = "Your last name should be at least 2 characters long."
# email validations
if len(post_data['email']) == 0:
errors['email'] = "Your email cannot be blank."
elif len(post_data['email']) < 6:
errors['email_format'] == "Invalid email address."
elif not EMAIL_REGEX.match(post_data['email']):
errors['email'] = "Email is invalid."
else:
same_email = User.objects.filter(email=post_data['email'])
if len(same_email) > 0:
errors['email_taken'] = "This email already exists. Register with a different email."
if len(post_data['password']) == 0:
errors['password'] = "You must enter a password."
elif len(post_data['password']) < 8:
errors['password'] = "Password must be minimum 8 characters in length."
if post_data['password'] != post_data['confirmpw']:
errors['confirmpw'] = "Passwords must match."
return errors
#Validations for password.
if len(postData['password']) == 0:
errors['password'] = "This field can't be left blank."
elif len(postData['password']) < 8:
errors['password'] = "Password must be min of 8 characters in length"
elif not PASS_REGEX.match(postData['password']):
errors['password'] = "Password must contain min 1 uppercase, 1 lowercase, 1 special character."
elif postData['password'] != postData['pw_confirm']:
errors['password'] = "Passwords don't match! Please try again."
return errors
def login_validator(self, post_data):
errors = {}
if len(post_data['email']) < 1:
errors['email'] = "Email is required to log in."
emailExists = User.objects.filter(email=post_data['email'])
if len(emailExists) == 0:
errors['email_not_found'] = "This email doesn't exist. Please register for an account first."
else:
user = emailExists[0]
if not bcrypt.checkpw(post_data['password'].encode(), user.password.encode()):
errors['password'] = "Password incorrect. Try again."
return errors
class User(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.CharField(max_length=50)
password = models.CharField(max_length=100)
friend = models.ManyToManyField('self', symmetrical=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
| [
"63776416+taichikoga@users.noreply.github.com"
] | 63776416+taichikoga@users.noreply.github.com |
3a611ccf2cb28d3307b8e7a4dc32f85cc66e26e5 | a2fb2f3821af2ae59e6354f93272c54b3abae52f | /stackdio/api/volumes/permissions.py | 0f515fe3e26bbaa9069b6647a06693e185d81f1f | [
"Apache-2.0"
] | permissive | pombredanne/stackdio | 1bae1803083f76df9c7bba56c0ffcf08897bbba6 | 8f037c428c187b65750ab387a3e11ed816f33be6 | refs/heads/master | 2021-01-18T11:30:14.157528 | 2016-01-28T17:15:05 | 2016-01-28T17:15:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | # -*- coding: utf-8 -*-
# Copyright 2014, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from stackdio.core.permissions import (
StackdioParentObjectPermissions,
StackdioPermissionsModelPermissions,
StackdioPermissionsObjectPermissions,
)
from . import models
class VolumeParentObjectPermissions(StackdioParentObjectPermissions):
parent_model_cls = models.Volume
class VolumePermissionsModelPermissions(StackdioPermissionsModelPermissions):
model_cls = models.Volume
class VolumePermissionsObjectPermissions(StackdioPermissionsObjectPermissions):
parent_model_cls = models.Volume
| [
"clark.perkins@digitalreasoning.com"
] | clark.perkins@digitalreasoning.com |
b09bf69a31c033edea81089557894bf401b7430a | f7abd047406935b64e9283d6dbd2c74477ecb10c | /setup.py | 404c61f08a6716c54a5862d5f3b1e140ca7e9071 | [
"MIT"
] | permissive | stjordanis/pyppl_require | 3deffa5d7d5a8742726aeafa22db675b20f35a98 | a21dcef26a78dffe37de795e51bb6afcd512982d | refs/heads/master | 2022-09-24T03:29:43.861751 | 2020-06-06T04:41:55 | 2020-06-06T04:41:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | # -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ''
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.rst')
if os.path.exists(readme_path):
with open(readme_path, 'rb') as stream:
readme = stream.read().decode('utf8')
setup(
long_description=readme,
name='pyppl_require',
version='0.0.5',
description='Requirement manager for processes of PyPPL',
python_requires='==3.*,>=3.6.0',
project_urls={
"homepage": "https://github.com/pwwang/pyppl_require",
"repository": "https://github.com/pwwang/pyppl_require"
},
author='pwwang',
author_email='pwwang@pwwang.com',
license='MIT',
entry_points={"pyppl": ["pyppl_require = pyppl_require"]},
packages=[],
package_dir={"": "."},
package_data={},
install_requires=['cmdy', 'pyppl==3.*', 'pyppl-annotate'],
extras_require={"dev": ["pytest", "pytest-cov"]},
)
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
b25f9d06d2352de2945a3002c76ecd56e103ca3f | 0a3b08678e2d6669188649bfc8d38439ac076f61 | /odziez/employees/migrations/0002_auto_20190709_1756.py | efb1cc0f3be39a5f76b6bd68ca3df0eb1ddf973e | [
"MIT"
] | permissive | szymanskirafal/odziez | 48f31fb72a8875511000f6aa5d9032770a670e64 | 029d20da0474a0380e8383f9f89c1072666c5399 | refs/heads/master | 2020-05-25T08:18:58.783882 | 2020-05-10T19:19:29 | 2020-05-10T19:19:29 | 187,707,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | # Generated by Django 2.1.8 on 2019-07-09 17:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('employees', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='supervisor',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='manager',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='employees.Job'),
),
migrations.AddField(
model_name='manager',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='job',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='employees.Position'),
),
migrations.AddField(
model_name='job',
name='work_place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='employees.WorkPlace'),
),
migrations.AddField(
model_name='employee',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='employees.Job'),
),
]
| [
"r.szymansky@gmail.com"
] | r.szymansky@gmail.com |
3005f70017c2002ef6c8d021def9baf0e1c3c788 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/lite/experimental/mlir/testing/op_tests/segment_sum.py | 8b15ed2ad66b3a9e04cd429d4cc111776b120b6f | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 2,232 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for segment_sum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_segment_sum_tests(options):
"""Make a set of tests to do segment_sum."""
test_parameters = [
{
"data_shape": [[4, 4], [4], [4, 3, 2]],
"data_dtype": [tf.float32, tf.int32],
"segment_ids": [[0, 0, 1, 1], [0, 1, 2, 2], [0, 1, 2, 3],
[0, 0, 0, 0]],
},
]
def build_graph(parameters):
"""Build the segment_sum op testing graph."""
data = tf.compat.v1.placeholder(
dtype=parameters["data_dtype"],
name="data",
shape=parameters["data_shape"])
segment_ids = tf.constant(parameters["segment_ids"], dtype=tf.int32)
out = tf.segment_sum(data, segment_ids)
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["data_dtype"],
parameters["data_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
options.use_experimental_converter = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
828ba20be5d5fdeac4a7e0791d2f672dc310856f | 4bc6028ed8ba403b69adfd6f5cbd139baece0f4d | /basic_python/machine_learn/EDA_demo/1-RedCard-EDA/demo1.py | 3eabd8b23bc54d1fd527cf00c799e51c2efe7981 | [] | no_license | xrw560/learn-pyspark | 0ef9ed427ff887ceed1c5e5773bf97ed25ecae04 | 618d16dafd73165e714111670119d9cdecc0bf1f | refs/heads/master | 2020-03-07T00:12:36.885000 | 2019-01-04T09:51:32 | 2019-01-04T09:51:32 | 127,152,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | from __future__ import absolute_import, division, print_function
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import GridSpec
import seaborn as sns
import numpy as np
import pandas as pd
import os, sys
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
sns.set_context("poster", font_scale=1.3)
# import missingno as msno
import pandas_profiling
from sklearn.datasets import make_blobs
import time
# Uncomment one of the following lines and run the cell:
df = pd.read_csv("redcard.csv.gz", compression='gzip')
print(df.shape)
# print(df.head())
# print(df.describe().T)
# print(df.dtypes)
all_columns = df.columns.tolist()
# print(all_columns)
print(df['height'].mean())
print(np.mean(df.groupby('playerShort').height.mean()))
# df2 = pd.DataFrame({'key1': ['a', 'a', 'b', 'b', 'a'],
# 'key2': ['one', 'two', 'one', 'two', 'one'],
# 'data1': np.random.randn(5),
# 'data2': np.random.randn(5)})
# print(df2)
# grouped = df2['data1'].groupby(df2['key1'])
# print(grouped.mean())
"""数据切分"""
| [
"ncutits@163.com"
] | ncutits@163.com |
b301c6394d1ec79858fdc08c17f1d155a353319b | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/523015_Callback_Pattern/recipe-523015.py | 8c272476e04c919345df9e0d8eedba99e1698aa3 | [
"MIT",
"Python-2.0"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 1,335 | py | class CallbackBase:
def __init__(self):
self.__callbackMap = {}
for k in (getattr(self, x) for x in dir(self)):
if hasattr(k, "bind_to_event"):
self.__callbackMap.setdefault(k.bind_to_event, []).append(k)
elif hasattr(k, "bind_to_event_list"):
for j in k.bind_to_event_list:
self.__callbackMap.setdefault(j, []).append(k)
## staticmethod is only used to create a namespace
@staticmethod
def callback(event):
def f(g, ev = event):
g.bind_to_event = ev
return g
return f
@staticmethod
def callbacklist(eventlist):
def f(g, evl = eventlist):
g.bind_to_event_list = evl
return g
return f
def dispatch(self, event):
l = self.__callbackMap[event]
f = lambda *args, **kargs: \
map(lambda x: x(*args, **kargs), l)
return f
## Sample
class MyClass(CallbackBase):
EVENT1 = 1
EVENT2 = 2
@CallbackBase.callback(EVENT1)
def handler1(self, param = None):
print "handler1 with param: %s" % str(param)
return None
@CallbackBase.callbacklist([EVENT1, EVENT2])
def handler2(self, param = None):
print "handler2 with param: %s" % str(param)
return None
def run(self, event, param = None):
self.dispatch(event)(param)
if __name__ == "__main__":
a = MyClass()
a.run(MyClass.EVENT1, 'mandarina')
a.run(MyClass.EVENT2, 'naranja')
| [
"betty@qburst.com"
] | betty@qburst.com |
8ae189cf070459fbdb708e52b7b19a26bfe19108 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/497a17977a8c1e62ac70b8485ad87dfa0cc70b8cmenu_controller.py | 497a17977a8c1e62ac70b8485ad87dfa0cc70b8c | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 4,672 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2015 Deepin, Inc.
# 2011 ~ 2015 Wang YaoHua
#
# Author: Wang YaoHua <mr.asianwang@gmail.com>
# Maintainer: Wang YaoHua <mr.asianwang@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from deepin_menu.menu import Menu, CheckableMenuItem
from i18n import _
from constants import MAIN_DIR
MENU_ICONS_DIR = os.path.join(MAIN_DIR, "image", "menu_icons")
menu_icon_normal = lambda x: os.path.join(MENU_ICONS_DIR,"%s-symbolic-small-norml.svg" % x)
menu_icon_hover = lambda x: os.path.join(MENU_ICONS_DIR, "%s-symbolic-small-hover.svg" % x)
menu_icon_tuple = lambda x: (menu_icon_normal(x), menu_icon_hover(x))
save_sub_menu = [
CheckableMenuItem("save:radio:_op_auto_save", _("Autosave")),
CheckableMenuItem("save:radio:_op_save_to_desktop", _("Save to desktop")),
CheckableMenuItem("save:radio:_op_copy_to_clipboard", _("Copy to clipboard")),
CheckableMenuItem("save:radio:_op_save_as", _("Save to specified folder")),
CheckableMenuItem("save:radio:_op_copy_and_save", _("Autosave and copy to clipboard")),
]
right_click_menu = [
("_rectangle", _("Rectangle tool"), menu_icon_tuple("rectangle-tool")),
("_ellipse", _("Ellipse tool"), menu_icon_tuple("ellipse-tool")),
("_arrow", _("Arrow tool"), menu_icon_tuple("arrow-tool")),
("_line", _("Brush tool"), menu_icon_tuple("line-tool")),
("_text", _("Text tool"), menu_icon_tuple("text-tool")),
None,
("_save", _("Save"), menu_icon_tuple("save")),
("_share", _("Share"), menu_icon_tuple("share")),
("_exit", _("Exit"), menu_icon_tuple("exit")),
]
class MenuController(QObject):
toolSelected = pyqtSignal(str, arguments=["toolName"])
saveSelected = pyqtSignal(int, arguments=["saveOption"])
shareSelected = pyqtSignal()
exitSelected = pyqtSignal()
preMenuShow = pyqtSignal()
postMenuHide = pyqtSignal()
def __init__(self):
super(MenuController, self).__init__()
def _menu_unregistered(self):
self.postMenuHide.emit()
def _menu_item_invoked(self, _id, _checked):
self.postMenuHide.emit()
if _id == "_rectangle":
self.toolSelected.emit("_rectangle")
if _id == "_ellipse":
self.toolSelected.emit("_ellipse")
if _id == "_arrow":
self.toolSelected.emit("_arrow")
if _id == "_line":
self.toolSelected.emit("_line")
if _id == "_text":
self.toolSelected.emit("_text")
if _id == "save:radio:_op_auto_save":
self.saveSelected.emit(1)
if _id == "save:radio:_op_save_to_desktop":
self.saveSelected.emit(0)
if _id == "save:radio:_op_copy_to_clipboard":
self.saveSelected.emit(4)
if _id == "save:radio:_op_save_as":
self.saveSelected.emit(2)
if _id == "save:radio:_op_copy_and_save":
self.saveSelected.emit(3)
if _id == "_share":
self.shareSelected.emit()
if _id == "_exit":
self.exitSelected.emit()
@pyqtSlot(int)
def show_menu(self, saveOption):
self.preMenuShow.emit()
self.menu = Menu(right_click_menu)
self.menu.getItemById("_save").setSubMenu(Menu(save_sub_menu))
self.menu.getItemById("save:radio:_op_auto_save").checked = \
saveOption == 1
self.menu.getItemById("save:radio:_op_save_to_desktop").checked = \
saveOption == 0
self.menu.getItemById("save:radio:_op_copy_to_clipboard").checked = \
saveOption == 4
self.menu.getItemById("save:radio:_op_save_as").checked = \
saveOption == 2
self.menu.getItemById("save:radio:_op_copy_and_save").checked = \
saveOption == 3
self.menu.itemClicked.connect(self._menu_item_invoked)
self.menu.menuDismissed.connect(self._menu_unregistered)
self.menu.showRectMenu(QCursor.pos().x(), QCursor.pos().y())
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
8e576861d86e5cd001a0f730f118b08f34ad0da3 | da01d5cdffd4f8e6a4ee91a528712f2efd8ba5d6 | /setup.py | d0a51ad06f6871b1f3f6730d697649015b7a0df1 | [] | no_license | Durant21/test10 | 7dc6a43973a8bcb10694105727f88b80d237b29a | 6919ac5f6c4081848c12758730b9ccca49bba6fc | refs/heads/master | 2022-12-02T07:28:00.339793 | 2019-08-30T14:12:07 | 2019-08-30T14:12:07 | 204,570,708 | 0 | 1 | null | 2022-11-16T07:47:22 | 2019-08-26T22:14:28 | Python | UTF-8 | Python | false | false | 1,181 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_jinja2',
'pyramid_debugtoolbar',
'waitress',
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest', # includes virtualenv
'pytest-cov',
]
setup(name='test10',
version='0.0',
description='test10',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={
'testing': tests_require,
},
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = test10:main
""",
)
| [
"durant.crimson@icloud.com"
] | durant.crimson@icloud.com |
f06be5b8bc669aeca413c35e494b94a21aa374b1 | 983d740b87b42d8af3c0db9d746dc7fe32d83ccd | /ucf-insert-io.py | 1d1351a8a2e9ac8091a01a2b5e115a7b906f0cce | [] | no_license | zzfd97/fpga-utils | 266357653e7089d40090f10e49cd0611f530397a | ffededb9a74cb8a786691231b7eea8af7f8dfd69 | refs/heads/master | 2021-12-08T19:06:54.545577 | 2016-03-30T21:42:22 | 2016-03-30T21:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,045 | py | #!/usr/bin/env python
"""
Inserts IO pin information into UCF files
"""
from __future__ import print_function
import argparse
import csv
import re
from jinja2 import Template
import verilogParse
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('input', type=str, help="input UCF file")
parser.add_argument('-p', '--pkg', type=str, help="Xilinx package file")
parser.add_argument('--ioc', type=int, help="IO name column (for multi-part CSV)")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
pkg_name = args.pkg
input_name = args.input
output_name = args.output
opt_io_col = -1 if args.ioc is None else args.ioc-1
if pkg_name is None:
raise Exception("No package file specified")
if input_name is None:
raise Exception("No input file specified")
if output_name is None:
output_name = input_name + '.out'
print("Reading package file")
try:
pkg_file = open(pkg_name, 'r')
except Exception as ex:
print("Error opening \"%s\": %s" %(pkg_name, ex.strerror), file=sys.stderr)
exit(1)
pkg_contents = list()
pin_col = -1
bank_col = -1
io_col = -1
row_length = 0
# read header
header = next(pkg_file)
if ',' in header:
pkg_reader = csv.reader(pkg_file)
else:
pkg_reader = pkg_file
for line in pkg_reader:
if isinstance(line, str):
row = line.split()
else:
row = line
if len(row) > 1:
row = [x.strip() for x in row]
pkg_contents.append(row)
# detect IO_ column
if io_col < 0:
for i in range(len(row)):
if "IO_" in row[i]:
if opt_io_col == i or opt_io_col < 0:
io_col = i
# This should be a valid row, so get the length
row_length = len(row)
# Detect pin and bank columns
for k in range(len(row)):
if re.match("[a-zA-Z]{1,2}[0-9]{1,2}", row[k]) is not None:
pin_col = k
if re.match("[0-9]{1,2}", row[k]) is not None:
bank_col = k
# filter length
pkg_contents = [x for x in pkg_contents if len(x) == row_length]
pkg_file.close()
if pin_col < 0:
print("Could not determine pin column", file=sys.stderr)
exit(1)
if bank_col < 0:
print("Could not determine bank column", file=sys.stderr)
exit(1)
if io_col < 0:
print("Could not determine IO column", file=sys.stderr)
exit(1)
pins = [x[pin_col].lower() for x in pkg_contents]
banks = [x[bank_col] for x in pkg_contents]
ios = [x[io_col] for x in pkg_contents]
print("Processing UCF file")
try:
input_file = open(input_name, 'r')
except Exception as ex:
print("Error opening \"%s\": %s" %(input_name, ex.strerror), file=sys.stderr)
exit(1)
try:
output_file = open(output_name, 'w')
except Exception as ex:
print("Error opening \"%s\": %s" %(output_name, ex.strerror), file=sys.stderr)
exit(1)
for line in input_file:
# deal with comments
line_raw = line.split('#', 2)
ucf_line = line_raw[0]
ucf_line_l = ucf_line.lower()
res = re.search('loc\s*=\s*\"(.+)\"', ucf_line_l)
if res is not None:
loc = res.group(1)
try:
i = pins.index(loc)
bank = banks[i]
io = ios[i]
comment = " Bank = %s, %s" % (bank, io)
if len(line_raw) == 1:
line_raw[0] += ' '
line_raw.append(comment)
else:
c = line_raw[1]
# strip old bank information
c = re.sub('\s*bank\s*=\s*(\d+|\?)\s*,\s*IO_(\w+|\?)', '', c, flags=re.IGNORECASE)
c = re.sub('\s*bank\s*=\s*(\d+|\?)\s*', '', c, flags=re.IGNORECASE)
c = re.sub('\s*IO_(\w+|\?)', '', c, flags=re.IGNORECASE)
line_raw[1] = comment + c
except ValueError:
pass
line_raw[0] = ucf_line
line = '#'.join(line_raw)
output_file.write(line)
input_file.close()
output_file.close()
print("Wrote output file %s" % output_name)
print("Done")
if __name__ == "__main__":
main()
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
6ff7b9df9c9027112ef8f0f53dae56b8a8caa9ce | 53c31cb08279e4a95db180c949d1cb86b2d5cecf | /src/sentry/api/serializers/models/event.py | 6b6474c4df161daf44a915d1ed638c9fe5cfea64 | [
"BSD-2-Clause"
] | permissive | jessepollak/sentry | eab2a2f7a8d46f3236377be2feb9bc1a508b94d2 | ea46b05ea87814e19cdc5f4883df073a73482261 | refs/heads/master | 2021-01-18T10:20:52.522602 | 2014-05-25T02:38:52 | 2014-05-25T02:38:52 | 20,238,431 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from sentry.api.serializers import Serializer, register
from sentry.models import Event
@register(Event)
class EventSerializer(Serializer):
def serialize(self, obj, user):
d = {
'id': str(obj.id),
'eventID': str(obj.event_id),
'project': {
'id': str(obj.project.id),
'name': obj.project.name,
'slug': obj.project.slug,
},
'message': obj.message,
'checksum': obj.checksum,
'platform': obj.platform,
'dateCreated': obj.datetime,
'timeSpent': obj.time_spent,
}
return d
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
d7bfb04c2fc9a1312fbbaa38dcfbba968991b39b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_forwarding.py | bdc7ef3dad2d535a336a466273bee7488b4c401f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py |
from xai.brain.wordbase.adverbs._forward import _FORWARD
#calss header
class _FORWARDING(_FORWARD, ):
def __init__(self,):
_FORWARD.__init__(self)
self.name = "FORWARDING"
self.specie = 'adverbs'
self.basic = "forward"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a76b7945c71328832bb1f5a43a80993a49e6fac7 | 7d85c42e99e8009f63eade5aa54979abbbe4c350 | /game/lib/coginvasion/gui/MoneyGui.py | 80d04cf287cec11972b888d1cbdee7db2278a78a | [] | no_license | ToontownServerArchive/Cog-Invasion-Online-Alpha | 19c0454da87e47f864c0a5cb8c6835bca6923f0e | 40498d115ed716f1dec12cf40144015c806cc21f | refs/heads/master | 2023-03-25T08:49:40.878384 | 2016-07-05T07:09:36 | 2016-07-05T07:09:36 | 348,172,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
Filename: MoneyGui.py
Created by: blach (06Aug14)
"""
from panda3d.core import *
from direct.gui.DirectGui import *
class MoneyGui:
def createGui(self):
self.deleteGui()
self.frame = DirectFrame(parent=base.a2dBottomLeft, pos=(0.45, 0, 0.155))
gui = loader.loadModel("phase_3.5/models/gui/jar_gui.bam")
self.jar = OnscreenImage(image=gui, scale=0.5, parent=self.frame)
mf = loader.loadFont("phase_3/models/fonts/MickeyFont.bam")
self.money_lbl = DirectLabel(text="", text_font=mf, text_fg=(1,1,0,1), parent=self.jar, text_scale=0.2, relief=None, pos=(0, 0, -0.1))
gui.remove_node()
def deleteGui(self):
if hasattr(self, 'jar'):
self.jar.destroy()
del self.jar
if hasattr(self, 'money_lbl'):
self.money_lbl.destroy()
del self.money_lbl
if hasattr(self, 'frame'):
self.frame.destroy()
del self.frame
return
def update(self, moneyAmt):
if hasattr(self, 'money_lbl'):
if moneyAmt <= 0:
self.money_lbl['text_fg'] = (0.9, 0, 0, 1)
else:
self.money_lbl['text_fg'] = (1, 1, 0, 1)
self.money_lbl['text'] = str(moneyAmt)
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
0730846c136e5ec604b7b24c9ff97970b6897dad | 5eca83a3a019467c8e5fafe5f2c2f6dc946a0e28 | /solutions/day_87.py | 8a86534e360185c3905a7ecd2326b2564738fd67 | [] | no_license | Kontowicz/Daily-Interview-Pro | 4c821b6afc9451c613f06e3850072e10d7d6a7d4 | 3bbe26430b6d004821477e14d37debe5d4a6d518 | refs/heads/master | 2020-06-25T00:37:04.138548 | 2020-02-28T16:43:30 | 2020-02-28T16:43:30 | 199,140,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def hasCycle(head):
data = set()
while head:
if head in data:
return True
data.add(head)
head = head.next
return False
testHead = ListNode(4)
node1 = ListNode(3)
testHead.next = node1
node2 = ListNode(2)
node1.next = node2
node3 = ListNode(1)
node2.next = node3
testTail = ListNode(0)
node3.next = testTail
testTail.next = node1
print(hasCycle(testHead))
# True | [
"przemyslowiec@gmail.com"
] | przemyslowiec@gmail.com |
5980d6c589b4744b3741e44661faf0329fc121b0 | 37bf9e197e46acf596ae28f71c8d9a859d169559 | /quotes/api/urls.py | 56f3fef0d9ac978ef57399cf2c6a8c8fc7763851 | [] | no_license | khmahmud101/django_project | 6e6fe77cffd4508f396cb2dc2672b71b78a2cb6a | 4e6ea4da071846df6a211c95036683d9fbb68cf3 | refs/heads/master | 2020-08-23T13:58:48.787821 | 2020-01-09T14:01:02 | 2020-01-09T14:01:02 | 216,627,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py |
from django.urls import path
from . views import *
urlpatterns = [
path('', QuoteCategoryAPIView.as_view()),
path('quotes/', QuoteAPIView.as_view()),
path('quotes/<int:pk>/',QuoteAPIDetailView.as_view()),
path('quotes/new/',QuoteAPINewView.as_view())
] | [
"kmahmud1991@gmail.com"
] | kmahmud1991@gmail.com |
4375b3728573bff258536cea3329c023f1a4739e | 14c6dcbea4505738513a0de745b5b62e7c8f3a20 | /myblog/forms.py | f6243f5097b213d48331e2a75b800cdd8de9d285 | [] | no_license | ZohanHo/Blog | 17982453896c1fa813d594f79f81fd4bce70f783 | fb648985016965674bbc983793fc48ac7772e6bb | refs/heads/master | 2020-04-03T00:34:35.079210 | 2018-11-03T21:04:26 | 2018-11-03T21:04:26 | 154,902,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,772 | py | from django import forms
from .models import *
from django.core.exceptions import ValidationError
# в моделях мы передавали в конструктор класса поля которые указали (title, bode и т.д.), в формах мы отступаем от этого общего поведения
# в формах мы должны передавать данные в конструктор которые мы берем из специального обьекта, из словаря который наз. clean_data
class TagForm(forms.ModelForm):
#title = forms.CharField(max_length=50) # это input
#slug = forms.SlugField(max_length=50)
class Meta:
model = Tag
fields = ["title"] # , "slug"
# для того что бы в url который принимает именое значение blog/<slug> не попал create как slug, при переходе на страницу blog/create
# нам нужно сделать проверку, с помошью метода clean_slug (slug тут потму что проверям slug, стаил такой)
def clean_slug(self):
pk = self.cleaned_data["pk"].lower()
if pk == "create": # делаем проверку на create в slug
raise ValidationError("slug не может быть create")
if Tag.objects.filter(pk__iexact=pk).count(): # делаем проверку что бы поле не дублировалось
raise ValidationError("Поле slug - {} уже существует".format(pk))
return pk
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ["title", "body", "tags_to_post"] # , "slug"
def clean_post(self):
new_post = self.cleaned_data["pk"].lower()
if new_post == "create":
raise ValidationError ("Post не может быть create")
if Post.objects.filter(pk__iexact=new_post).count():
raise ValidationError("Такой slug существует")
return new_post
class FormSearch(forms.Form): # Класс Form - принимает данные из запроса(в виде текстовых строк),валидирует относительно типа полей, приводит к нужному представлению на языке питон
search = forms.CharField(required=False) # текстовое поле, required=False - не ртебуется для успешной валидации формы
sort = forms.ChoiceField(choices=(("pk", "pk"), ("date_pub", "Дата создания"), ("title", "Заголовок"), ("body", "Текст")), required=False)
# коментируем save, так как у ModelForm есть свой метод save
# переопределяем мето save, который нам вернет (сохранит в базе) поля title и slug но уже со словаря cleaned_data
#def save(self):
#new_tag = Tag.objects.create(title=self.cleaned_data["title"], slug=self.cleaned_data["slug"])
#return new_tag
# from blog.form import TagForm
# tf = TegForm() создал екземпляр класса <TagForm bound=False, valid=Unknown, fields=(title;slug)> bound=False - ввел пользователь что то или нет
# dir(tf) список атрибутов сщзданого нами обьекта
# from blog.form import TagForm
# tf = TegForm() создал екземпляр класса <TagForm bound=False, valid=Unknown, fields=(title;slug)> bound=False - ввел пользователь что то или нет
# dir(tf) обратились к атрибутам обьекта
# ['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__',
# '__gt__', '__hash__', '__html__', '__init__', '__iter__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__',
# '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_bound_fields_cache',
# '_clean_fields', '_clean_form', '_errors', '_html_output', '_post_clean', 'add_error', 'add_initial_prefix', 'add_prefix', 'as_p',
# 'as_table', 'as_ul', 'auto_id', 'base_fields', 'changed_data', 'clean', 'data', 'declared_fields', 'default_renderer', 'empty_permitted',
# 'error_class', 'errors', 'field_order', 'fields', 'files', 'full_clean', 'get_initial_for_field', 'has_changed', 'has_error', 'hidden_fields',
# 'initial', 'is_bound', 'is_multipart', 'is_valid', 'label_suffix', 'media', 'non_field_errors', 'order_fields', 'prefix', 'renderer',
# 'use_required_attribute', 'visible_fields']
# tf.is_bound False проверяем передал ли что то пользователь в форму
# tf.is_valid() False так как is_bound - False, то и is_valid() False
# tf.errors {} тоже пустой так как мы не передали никаких данных
# d = {"title":"", "slug":""} создали словарь, с пустыми строками
# tf=TagForm(d) # снова создаю экземпляр, но на етот раз передаю словарь
# # tf.is_bound True проверяем передал ли что то пользователь в форму, сейчас передал
# tf.is_valid() False
# tf.errors {'title': ['Обязательное поле.'], 'slug': ['Обязательное поле.']} видим что есть обязательные поля
# # dir(tf) если снова обратимся к атрибутам, то видим что появился cleaned_date
# tf.cleaned_data выдаст пустой словарь {} - очищенные данные, потому что у нас заполнена форма tf.is_bound - True, и вызвали метод is_valid(),
# в етот момент создается словарь cleaned_data, если бы is_valid был бы True, ети бы данные были бы заполнены
# d = {"title":"fack", "slug":"me"}
# tf = TagForm(d)
# tf.is_bound True
# tf.is_valid() True - так как передали уже не пустую строку
# tf.cleaned_data при вызове видим что в словаре данные которые передал пользователь {'title': 'fack', 'slug': 'me'}
# tf.cleaned_data содержит очищиные данные, и именно данные из егото словаря мы должны использовать для создания моделей
# from myblog.models import Tag
# tag = Tag(title=tf.cleaned_data["title"], slug=tf.cleaned_data["slug"]) создал новый обьект в models.tag и передал данные с обьекта Tegform
# который у нас tf и с его словаря cleaned_data
# В общем виде валидация данных (проверка) и их очистка выглядит следующим образом:
# django вызывает метод is_valid который если True, последовательно вызывает clean методы, всей формы и отдельных полей
# если все проверено и валидировано, то они и помещаются в словарь cleaned_data, если что то не то, то исключение Validatioerrors | [
"serduk.evgeniy@gmail.com"
] | serduk.evgeniy@gmail.com |
0a7d65418b3ea0de2730832b1c3361551ee0625c | 4fa0a0cdb34a87eb52a704da5679a5693d45d24e | /Server/app/views/sporrow/sporrow_response.py | c8255537838352fed0935dff511c50e5c9719501 | [
"Apache-2.0"
] | permissive | Sporrow/Sporrow-Backend | 6ac6a8f3f24c43a4e84a8bf8975c9af1f02a807c | a711f8a25c0b6fdbbeff0a980fbf39a470020e23 | refs/heads/master | 2020-03-19T04:11:15.266927 | 2018-06-02T21:59:28 | 2018-06-02T21:59:28 | 135,803,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | from calendar import monthrange
from datetime import datetime
import re
from flask import Blueprint, Response, abort, g, request
from flask_restful import Api
from flasgger import swag_from
from app.models.account import AccountModel
from app.models.interest import MinorInterestModel, MajorInterestModel
from app.models.sporrow import SporrowModel
from app.views import BaseResource, auth_required, json_required
api = Api(Blueprint(__name__, __name__))
@api.resource('/sporrow/response/<id>')
class SporrowResponse(BaseResource):
def post(self, id):
"""
대여 제안 수락
"""
def delete(self, id):
"""
대여 제안 거절
""" | [
"city7310@naver.com"
] | city7310@naver.com |
d85f304973941bc00691ad8321ed91d0eafde44d | 988cbefdb6d45564b0b5d1d7865a5c71ec8e0284 | /dbinsertscripts/placement/extra_info_feed.py | cf3c1a8e93c5f57b61b05578661514ef42ec4f49 | [] | no_license | adi0311/FusionIIIT | 969839761f886fb32b2bd953ee2ff44b7b666d03 | a20502267c4b5b650f34448f5685a240e7260954 | refs/heads/master | 2022-11-13T20:54:07.542918 | 2020-06-30T18:26:29 | 2020-06-30T18:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | import django
import xlrd
from applications.globals.models import ExtraInfo, DepartmentInfo
from django.contrib.auth.models import User
class Data:
def __init__(self, excel_file, row):
self.file = xlrd.open_workbook(excel_file)
self.row = row
self.sheet = self.getSheet()
def getSheet(self):
return self.file.sheet_by_index(0)
def fillExtrainfo(self):
for i in range (1,self.row+1):
try:
user = User.objects.get(username=str(int(self.sheet.cell(i,1).value)))
add=ExtraInfo()
add.id = user.username
add.user = user
add.age = 21
add.address = "ghar"
add.phone_no = 9999999999
add.user_type = 'student'
dept = self.sheet.cell(i,3).value.strip()
add.department = DepartmentInfo.objects.get(name=dept)
add.about_me = "i am fine"
add.save()
print('saved')
except:
print(user.username,'unsucessful')
d = Data('dbinsertscripts/placement/B.Tech 2012.xlsx',131)
d.fillExtrainfo()
# exec(open('dbinsertscripts/placement/extra_info_feed.py').read())
| [
"guptaheet53@gmail.com"
] | guptaheet53@gmail.com |
d32a21a42a95bfb0e893c640ad743f6ecea70c77 | a88d9c0176f5e4c0d0bd9664270e000ebb5edbd9 | /component/tile/time_tile.py | 5076077c2c34536ba54d89d0fb0c7d9a61826238 | [
"MIT"
] | permissive | sandroklippel/fcdm | fb81c73fc6bd1cf296f9301272923c3627474d3f | 5a54e6352bb574ba409be38882ff0d13b3473b7a | refs/heads/master | 2023-08-19T22:05:52.055545 | 2021-08-24T11:23:40 | 2021-08-24T11:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
from component.message import cm
class TimeTile(sw.Tile):
def __init__(self, model):
# create the widgets
baseline_title = v.Html(tag='h4', children=[cm.input_lbl.baseline], class_="mb-0 mt-5")
baseline_start_picker = sw.DatePicker(label=cm.input_lbl.start)
baseline_end_picker = sw.DatePicker(label=cm.input_lbl.end)
baseline_picker_line = v.Layout(xs12=True, row=True, children = [baseline_start_picker, baseline_end_picker])
analysis_title = v.Html(tag='h4', children=[cm.input_lbl.analysis], class_="mb-0 mt-5")
analysis_start_picker = sw.DatePicker(label=cm.input_lbl.start)
analysis_end_picker = sw.DatePicker(label=cm.input_lbl.end)
analysis_picker_line = v.Layout(xs12=True, row=True, children = [analysis_start_picker, analysis_end_picker])
# bind the widgets
model \
.bind(baseline_start_picker, 'reference_start') \
.bind(baseline_end_picker, 'reference_end') \
.bind(analysis_start_picker, 'analysis_start') \
.bind(analysis_end_picker, 'analysis_end')
super().__init__(
'nested_widget',
cm.tile.time,
inputs = [baseline_title, baseline_picker_line, analysis_title, analysis_picker_line]
)
| [
"pierrick.rambaud49@gmail.com"
] | pierrick.rambaud49@gmail.com |
22ca70f79baa3e3b53c4d911e89c6a9ae77161a9 | 06a50cfded23b760d5b2a5ae7d5c4761ae2d4dc8 | /edX/MITx6.00.1x/final/final_problem4.py | 65c8ca1b81cb181c51e0d20782bc82ce1c1d18dc | [
"Apache-2.0"
] | permissive | spencerzhang91/coconuts-on-fire | b0655b3dd2b310b5e62f8cef524c6fddb481e758 | 407d61b3583c472707a4e7b077a9a3ab12743996 | refs/heads/master | 2021-09-21T07:37:07.879409 | 2018-08-22T03:24:36 | 2018-08-22T03:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | def longest_run(L):
"""
Assumes L is a list of integers containing at least 2 elements.
Finds the longest run of numbers in L, where the longest run can
either be monotonically increasing or monotonically decreasing.
In case of a tie for the longest run, choose the longest run
that occurs first.
Does not modify the list.
Returns the sum of the longest run.
"""
longest = []
increasing = None
# main for loop
for i in range(len(L) - 1):
# this for loop decides if current run is increasing
for j in range(i+1, len(L)):
if L[j] == L[j-1]:
continue
elif L[j] > L[j-1]:
increasing = True
increase = [L[i]]
break
else:
increasing = False
decrease = [L[i]]
break
if increasing == None:
if len(L[i:]) > len(longest):
return sum(L[i:])
# this for loop actually adds items in respective list
for j in range(i+1, len(L)):
if L[j] >= L[j-1] and increasing:
increase.append(L[j])
if j == len(L) - 1 and len(increase) > len(longest):
return sum(increase)
elif L[j] <= L[j-1] and not increasing:
decrease.append(L[j])
if j == len(L) - 1 and len(decrease) > len(longest):
return sum(decrease)
else:
if increasing and len(increase) > len(longest):
longest = increase[:]
increase = []
elif not increasing and len(decrease) > len(longest):
longest = decrease[:]
decrease = []
i = j - 1
break
# print(L, len(L), longest, j)
return sum(longest)
l1 = [3, 3, 3, 3, 3]
l2 = [3, 2, -1, 2, 7]
l3 = [100, 200, 300, -100, -200, -1500, -5000]
l4 = [3, 3, 3, 3, 3, 3, 3, -10, 1, 2, 3, 4]
print(longest_run(l1))
print(longest_run(l2))
print(longest_run(l3))
print(longest_run(l4))
| [
"spencerpomme@live.com"
] | spencerpomme@live.com |
25103517eaf993fb792f787dbdc32b3258b69e60 | 3c3b41bb9cdfc23cc95727636f0995560728098a | /FullDestroyAnalysis2016/Wheel0/Working/20160425/CMSSW_8_0_2/tmp/slc6_amd64_gcc530/src/HLTrigger/Muon/src/HLTriggerMuon/edm_write_config/scoutingMuonProducer_cfi.py | 4bc3fc3890cdd620372bcb3b4d8c0d53ac8a8fd7 | [] | no_license | FlorianScheuch/MassiveProductionMuonGun | eb5a2916345c21edf5fd0c5d6694333a0306c363 | a9a336364309cb7c9e952c0cd85060032d1dccd1 | refs/heads/master | 2016-08-12T23:12:01.159605 | 2016-05-04T11:26:35 | 2016-05-04T11:26:35 | 53,405,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import FWCore.ParameterSet.Config as cms
scoutingMuonProducer = cms.EDProducer('HLTScoutingMuonProducer',
ChargedCandidates = cms.InputTag('hltL3MuonCandidates'),
Tracks = cms.InputTag('hltL3Muons'),
EcalPFClusterIsoMap = cms.InputTag('hltMuonEcalPFClusterIsoForMuons'),
HcalPFClusterIsoMap = cms.InputTag('hltMuonHcalPFClusterIsoForMuons'),
TrackIsoMap = cms.InputTag('hltMuonTkRelIsolationCut0p09Map', 'combinedRelativeIsoDeposits'),
muonPtCut = cms.double(4),
muonEtaCut = cms.double(2.4)
)
| [
"scheuch@physik.rwth-aachen.de"
] | scheuch@physik.rwth-aachen.de |
8c05c91c0395da600980ef3863c433490959e584 | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /GodwillOnyewuchi/Phase 1/Python Basic 2/Day 6 task/task 3.py | e6756dd907f48a6c70240dbe8e61fd264526422b | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 166 | py | import os
import platform
print(f'Operating system name: {os.name}')
print(f'Platform name: {platform.system()}')
print(f'Platform release: {platform.release()}')
| [
"godwillonyewuchii@gmail.com"
] | godwillonyewuchii@gmail.com |
b098c03b36a2f4b47d20164b7839bda797ffb633 | a67147597814032c0fee8c1debb38e61730841c7 | /argo/workflows/client/models/v1_azure_disk_volume_source.py | b6fe1decebcb61acf9fe1bc06e8bcfb8dd50d0c4 | [
"Apache-2.0",
"MIT"
] | permissive | CermakM/argo-client-python | 12ddc49d8e10a2a68d5965f37b9353234a2c5906 | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | refs/heads/master | 2020-07-21T20:48:30.275935 | 2020-02-26T10:05:05 | 2020-02-26T10:05:05 | 206,972,491 | 36 | 9 | Apache-2.0 | 2020-02-26T10:05:06 | 2019-09-07T13:31:46 | Python | UTF-8 | Python | false | false | 6,637 | py | # coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: master
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1AzureDiskVolumeSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'caching_mode': 'str',
'disk_name': 'str',
'disk_uri': 'str',
'fs_type': 'str',
'kind': 'str',
'read_only': 'bool'
}
attribute_map = {
'caching_mode': 'cachingMode',
'disk_name': 'diskName',
'disk_uri': 'diskURI',
'fs_type': 'fsType',
'kind': 'kind',
'read_only': 'readOnly'
}
def __init__(self, caching_mode=None, disk_name=None, disk_uri=None, fs_type=None, kind=None, read_only=None): # noqa: E501
"""V1AzureDiskVolumeSource - a model defined in Swagger""" # noqa: E501
self._caching_mode = None
self._disk_name = None
self._disk_uri = None
self._fs_type = None
self._kind = None
self._read_only = None
self.discriminator = None
if caching_mode is not None:
self.caching_mode = caching_mode
if disk_name is not None:
self.disk_name = disk_name
if disk_uri is not None:
self.disk_uri = disk_uri
if fs_type is not None:
self.fs_type = fs_type
if kind is not None:
self.kind = kind
if read_only is not None:
self.read_only = read_only
@property
def caching_mode(self):
"""Gets the caching_mode of this V1AzureDiskVolumeSource. # noqa: E501
:return: The caching_mode of this V1AzureDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._caching_mode
@caching_mode.setter
def caching_mode(self, caching_mode):
"""Sets the caching_mode of this V1AzureDiskVolumeSource.
:param caching_mode: The caching_mode of this V1AzureDiskVolumeSource. # noqa: E501
:type: str
"""
self._caching_mode = caching_mode
@property
def disk_name(self):
"""Gets the disk_name of this V1AzureDiskVolumeSource. # noqa: E501
:return: The disk_name of this V1AzureDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._disk_name
@disk_name.setter
def disk_name(self, disk_name):
"""Sets the disk_name of this V1AzureDiskVolumeSource.
:param disk_name: The disk_name of this V1AzureDiskVolumeSource. # noqa: E501
:type: str
"""
self._disk_name = disk_name
@property
def disk_uri(self):
"""Gets the disk_uri of this V1AzureDiskVolumeSource. # noqa: E501
:return: The disk_uri of this V1AzureDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._disk_uri
@disk_uri.setter
def disk_uri(self, disk_uri):
"""Sets the disk_uri of this V1AzureDiskVolumeSource.
:param disk_uri: The disk_uri of this V1AzureDiskVolumeSource. # noqa: E501
:type: str
"""
self._disk_uri = disk_uri
@property
def fs_type(self):
"""Gets the fs_type of this V1AzureDiskVolumeSource. # noqa: E501
:return: The fs_type of this V1AzureDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1AzureDiskVolumeSource.
:param fs_type: The fs_type of this V1AzureDiskVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def kind(self):
"""Gets the kind of this V1AzureDiskVolumeSource. # noqa: E501
:return: The kind of this V1AzureDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1AzureDiskVolumeSource.
:param kind: The kind of this V1AzureDiskVolumeSource. # noqa: E501
:type: str
"""
self._kind = kind
@property
def read_only(self):
"""Gets the read_only of this V1AzureDiskVolumeSource. # noqa: E501
:return: The read_only of this V1AzureDiskVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1AzureDiskVolumeSource.
:param read_only: The read_only of this V1AzureDiskVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1AzureDiskVolumeSource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1AzureDiskVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"macermak@redhat.com"
] | macermak@redhat.com |
4869045ced8c753a4f306d56df41c546c702e354 | eb251e90f293a9e8831ced9f996c36e5ab0fc89d | /atgBlog/urls.py | 43fb3bd878bd084b6e462766648d8c7cf4146014 | [] | no_license | ayushganguli1769/Blog | d6b053ce8a169082fc41ab87d340f8cfec0039e7 | a9b8a0d06a704b17c2d7e139e3870a046acc59cf | refs/heads/master | 2022-05-10T23:23:28.860371 | 2020-01-31T05:01:32 | 2020-01-31T05:01:32 | 237,314,050 | 0 | 0 | null | 2022-04-22T23:02:45 | 2020-01-30T22:00:00 | HTML | UTF-8 | Python | false | false | 1,084 | py | """atgBlog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('blog.urls')),
]
if settings.DEBUG: #Not for production code
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"ayushganguli1769@gmail.com"
] | ayushganguli1769@gmail.com |
3b3a4b7d216d98f5a962d869ba41b6ad3cfc21d8 | 586c97e81b448d9f4c1525205eaccc727f789ee7 | /src/buildercore/project/stack_generation.py | 1e4727a5c13880a14ed594cfa5204447e44e2cf8 | [
"MIT"
] | permissive | elifesciences/builder | 33542171fd43a454d8c45feae181037ff414874d | 7de9bb53c7e6a447a075a66023815166ea54092f | refs/heads/master | 2023-08-16T11:22:40.684539 | 2023-08-15T08:30:31 | 2023-08-15T08:30:31 | 56,778,863 | 12 | 14 | MIT | 2023-09-10T04:06:16 | 2016-04-21T14:08:05 | Python | UTF-8 | Python | false | false | 2,265 | py | '''logic to generate and refresh the configuration of stacks and their list of resources.
'''
from functools import reduce
import os
from buildercore.utils import ensure, merge
from buildercore.project import stack_config, stack_generation__s3_bucket
import logging
LOG = logging.getLogger(__name__)
def _regenerate_resource(resource):
"updates the given `resource`."
dispatch = {
's3-bucket': stack_generation__s3_bucket.regenerate_resource,
}
dispatch_fn = dispatch[resource['meta']['type']]
return dispatch_fn(resource)
def regenerate(stackname, config_path):
"""update each of the resources for the given `stackname` in stack config file `config_path`."""
stack_map = stack_config.read_stack_file(config_path)
defaults, stack_map = stack_config.parse_stack_map(stack_map)
ensure(stackname in stack_map, "stack %r not found. known stacks: %s" % (stackname, ", ".join(stack_map.keys())))
stack = stack_map[stackname]
new_resource_list = [_regenerate_resource(resource) for resource in stack['resource-list']]
stack['resource-list'] = new_resource_list
stack_config.write_stack_file_updates({stackname: stack}, config_path)
# ---
def generate_stacks(resource_type, config_path):
"""generate new stacks with a single resource of the given `resource_type`.
intended to bulk populate config files.
does *not* remove stacks that were previously generated but have since been deleted."""
dispatch = {
's3-bucket': stack_generation__s3_bucket.generate_stack
}
ensure(resource_type in dispatch,
"unsupported resource type %r. supported resource types: %s" % (resource_type, ", ".join(dispatch.keys())))
ensure(os.path.exists(config_path), "config path %r does not exist" % config_path)
dispatch_fn = dispatch[resource_type]
generated_stack_list = dispatch_fn(config_path)
# sanity check, make sure each generated stack looks like:
# {"foo-bucket": {"name": "foo-bucket", "meta": {...}, ...}}
for stack in generated_stack_list:
ensure(len(stack.keys()) == 1, "bad stack, expected exactly 1 key: %r" % stack)
stack_map = reduce(merge, generated_stack_list)
stack_config.write_stack_file_updates(stack_map, config_path)
| [
"noreply@github.com"
] | elifesciences.noreply@github.com |
35104a58f8e7034138019f0fcc5d29c4efa3db8e | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /478.generate-random-point-in-a-circle.python3.py | d3f495f6bbf8eed4bbf848d653b415a59364491a | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | #
# [915] Generate Random Point in a Circle
#
# https://leetcode.com/problems/generate-random-point-in-a-circle/description/
#
# algorithms
# Medium (35.05%)
# Total Accepted: 2.8K
# Total Submissions: 8K
# Testcase Example: '["Solution", "randPoint", "randPoint", "randPoint"]\n[[1.0, 0.0, 0.0], [], [], []]'
#
# Given the radius and x-y positions of the center of a circle, write a
# function randPoint which generates a uniform random point in the circle.
#
# Note:
#
#
# input and output values are in floating-point.
# radius and x-y position of the center of the circle is passed into the class
# constructor.
# a point on the circumference of the circle is considered to be in the
# circle.
# randPoint returns a size 2 array containing x-position and y-position of the
# random point, in that order.
#
#
#
# Example 1:
#
#
# Input:
# ["Solution","randPoint","randPoint","randPoint"]
# [[1,0,0],[],[],[]]
# Output: [null,[-0.72939,-0.65505],[-0.78502,-0.28626],[-0.83119,-0.19803]]
#
#
#
# Example 2:
#
#
# Input:
# ["Solution","randPoint","randPoint","randPoint"]
# [[10,5,-7.5],[],[],[]]
# Output: [null,[11.52438,-8.33273],[2.46992,-16.21705],[11.13430,-12.42337]]
#
#
# Explanation of Input Syntax:
#
# The input is two lists: the subroutines called and their arguments.
# Solution's constructor has three arguments, the radius, x-position of the
# center, and y-position of the center of the circle. randPoint has no
# arguments. Arguments are always wrapped with a list, even if there aren't
# any.
#
#
#
class Solution:
def __init__(self, radius, x_center, y_center):
"""
:type radius: float
:type x_center: float
:type y_center: float
"""
def randPoint(self):
"""
:rtype: List[float]
"""
# Your Solution object will be instantiated and called as such:
# obj = Solution(radius, x_center, y_center)
# param_1 = obj.randPoint()
| [
"richnakasato@hotmail.com"
] | richnakasato@hotmail.com |
39ce81a1f1b3370aee317de8eb639e17925a2def | 7bc529b7c074f14fd2f726d285f6453f2be4f07e | /PycharmProjects/class_system/venv/Scripts/easy_install-script.py | f1db90ceb86cd60139a8f10c95c258a77c4ff62a | [] | no_license | bopopescu/test-for-life | 52bc2bcb05c9f12c34311c5d8c7d69fac769fc20 | c65a13e16dcdcd1f9d91659b32a8b82794f40985 | refs/heads/master | 2022-11-17T17:45:47.382185 | 2020-04-01T08:31:47 | 2020-04-01T08:53:13 | 281,324,697 | 0 | 0 | null | 2020-07-21T07:17:22 | 2020-07-21T07:17:22 | null | UTF-8 | Python | false | false | 454 | py | #!C:\Users\dell\PycharmProjects\class_system\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"3176832432@qq.com"
] | 3176832432@qq.com |
87b4a130e349615dfb7bd957a728f59fc58ffd83 | 653eaef652627b155569b5fe9ab9bb3607fc1e78 | /alg/compartmental_gp/ModelSelection-R0.py | 4660c7e7d821149cfa546f0062dc596eff8f6e7e | [
"MIT",
"BSD-3-Clause"
] | permissive | IlyaTrofimov/mlforhealthlabpub | 11ab86a83bd2ffd2574364a956b322b0c62406ae | 190cbad2faae9e559ffe7a68143df7f747d70adc | refs/heads/main | 2023-04-16T03:58:38.423288 | 2021-04-21T10:22:43 | 2021-04-21T10:22:43 | 358,528,623 | 0 | 0 | NOASSERTION | 2021-04-16T08:25:26 | 2021-04-16T08:25:25 | null | UTF-8 | Python | false | false | 4,802 | py | #!/usr/bin/env python
# coding: utf-8
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pds
from pyro.ops.stats import quantile
import scipy.stats
import data_loader
import pyro_model.helper
# ## loading data
countries = [
'United Kingdom',
'Italy',
'Germany',
'Spain',
'US',
'France',
'Korea, South',
'Brazil',
'Iran',
'Netherlands',
'Canada',
'Turkey',
'Romania',
'Portugal',
'Sweden',
'Switzerland',
'Ireland',
'Hungary',
'Denmark',
'Austria',
'Mexico',
'India',
'Ecuador',
'Russia',
'Peru',
'Indonesia',
'Poland',
'Philippines',
'Japan',
'Pakistan',
'South Africa',
'Egypt',
'Norway'
]
prefix = 'trained_models/'
# prefix = ''
pad = 24
data_dict = data_loader.get_data_pyro(countries, smart_start=False, pad=pad, legacy=False)
data_dict = pyro_model.helper.smooth_daily(data_dict)
eval_list = countries
days = 14
train_len = data_dict['cum_death'].shape[0] - days
# ## loading results
seed_list = []
predictive_list = []
for seed in range(25):
model_id = 'all-countries-new-day-{}-rng-{}'.format(days, seed)
try:
with open(prefix + 'Loop{}/{}-predictive.pkl'.format(days, model_id), 'rb') as f:
predictive = pickle.load(f)
except Exception:
continue
predictive_list.append(predictive)
seed_list.append(seed)
print(len(seed_list))
# validation accuracy
val_window = 28
seir_error_list = []
for i in range(len(predictive_list)):
seir_train = quantile(predictive_list[i]['prediction'].squeeze(), 0.5, dim=0)[-val_window + 1:, :].numpy()
seir_train = np.diff(seir_train, axis=0)
seir_label = data_dict['daily_death'][train_len - val_window:train_len, :].numpy()
seir_error = np.abs(np.sum(seir_train, axis=0) - np.sum(seir_label, axis=0))
seir_error_list.append(seir_error)
seir_error = np.stack(seir_error_list, axis=0)
best_model = np.argmin(seir_error, axis=0)
best_seed = [seed_list[x] for x in best_model]
R0_list = []
s_index_list = []
R0_country = []
prediction_list = []
truth_list = []
for j, i in zip(range(len(countries)), best_model):
c = countries[j]
if c not in eval_list:
continue
t_init = data_dict['t_init'][j].squeeze()
predictive = predictive_list[i]['R0']
R0 = predictive.mean(axis=0).squeeze()[j, :].numpy()
R0[:t_init] = np.nan
R0_list.append(R0)
R0_country.append(c)
s_index = data_dict['s_index'][:train_len, j]
s_index[:t_init] = np.nan
s_index_list.append(s_index)
predictions = predictive_list[i]['prediction'].mean(axis=0).squeeze()[:, j].numpy()
prediction_list.append(predictions)
truth = data_dict['cum_death'][:train_len, j]
truth_list.append(truth)
R0 = np.stack(R0_list, axis=-1)
s_index = np.stack(s_index_list, axis=-1)
seir_predictions = np.stack(prediction_list, axis=-1)
truth = np.stack(truth_list, axis=-1)
df_r0 = pds.DataFrame(R0, columns=R0_country, index=data_dict['date_list'][:train_len])
df_s_index = pds.DataFrame(s_index, columns=R0_country, index=data_dict['date_list'][:train_len])
df_r0.head()
df_r0.to_csv('tables/Fig-C6-R0.csv')
c = 0
plt.plot(R0[:, c])
plt.plot(s_index[:, c])
c = 0
plt.plot(np.diff(seir_predictions[:, c]))
plt.plot(np.diff(truth[:, c]))
rho_list = []
r_init_list = []
for r, s in zip(R0_list, s_index_list):
s = s.numpy()
nonna = np.logical_and(~np.isnan(r), ~np.isnan(s))
met = scipy.stats.pearsonr(s[nonna], r[nonna])
r_init = np.mean(r[nonna][:7])
rho_list.append(met[0])
r_init_list.append(r_init)
df_c = pds.DataFrame({'country': countries, 'rho': rho_list, 'r_init': r_init_list})
df_c = df_c.sort_values('country')
df_c.head()
dat_feat = data_loader.get_country_feature(countries)
dat_feat = dat_feat.pivot('country', 'metric', 'value')
dat_feat.head()
row_list = []
for c in dat_feat.columns:
nonna = ~np.isnan(dat_feat[c].values)
met = scipy.stats.pearsonr(dat_feat[c][nonna], df_c['r_init'][nonna])
row_list.append((c, met[0], met[1]))
dat_cor = pds.DataFrame(row_list, columns=['met', 'cor', 'p_value'])
dat_cor = dat_cor.sort_values('cor', ascending=False)
dat_cor_p = dat_cor[dat_cor['p_value'] < 0.05].iloc[:10, :]
dat_cor_p.to_csv('tables/Table-C5-features_R0_before_lockdown.csv')
# policy effect
row_list = []
for c in dat_feat.columns:
nonna = ~np.isnan(dat_feat[c].values)
met = scipy.stats.pearsonr(dat_feat[c][nonna], df_c['rho'][nonna])
row_list.append((c, met[0], met[1]))
dat_cor = pds.DataFrame(row_list, columns=['met', 'cor', 'p_value'])
dat_cor = dat_cor.sort_values('cor', ascending=False)
dat_rho_p = dat_cor[dat_cor['p_value'] < 0.05]
dat_rho_p.to_csv('tables/Table-C4-features_effect_of_lockdown.csv')
| [
"e.s.saveliev@gmail.com"
] | e.s.saveliev@gmail.com |
456a7f4e05576c0361231e1c2ab4676819d15196 | 07cdb5dca64527b3172b3b59d0b5de266dcaf3a8 | /job_runner_worker/enqueuer.py | e17481448fd1ece24beb9e974873340d47266d76 | [] | no_license | alexcouper/job-runner-worker | 3d462fa8f7c6f588a0da90c9f580736af3e3fab0 | 71166bb9d2fd2572e728857131ebffff5f14cf42 | refs/heads/master | 2021-01-18T10:19:07.443862 | 2013-02-11T12:16:32 | 2013-02-11T12:16:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,601 | py | import json
import logging
import random
import time
from datetime import datetime, timedelta
import zmq.green as zmq
from gevent.queue import Empty
from pytz import utc
from job_runner_worker.config import config
from job_runner_worker.models import KillRequest, Run, Worker
logger = logging.getLogger(__name__)
def enqueue_actions(
zmq_context, run_queue, kill_queue, event_queue, exit_queue):
"""
Handle incoming actions sent by the broadcaster.
:param zmq_context:
An instance of ``zmq.Context``.
:param run_queue:
An instance of ``Queue`` for pushing the runs to.
:param kill_queue:
An instance of ``Queue`` for pushing the kill-requests to.
:param event_queue:
An instance of ``Queue`` for pushing events to.
:param exit_queue:
An instance of ``Queue`` to consume from. If this queue is not empty,
the function needs to terminate.
"""
logger.info('Starting enqueue loop')
subscriber = _get_subscriber(zmq_context)
expected_address = 'master.broadcast.{0}'.format(
config.get('job_runner_worker', 'api_key'))
last_activity_dts = datetime.utcnow()
reconnect_after_inactivity = config.getint(
'job_runner_worker', 'reconnect_after_inactivity')
while True:
try:
exit_queue.get(block=False)
logger.info('Termintating enqueue loop')
return
except Empty:
pass
try:
address, content = subscriber.recv_multipart(zmq.NOBLOCK)
last_activity_dts = datetime.utcnow()
except zmq.ZMQError:
# this is needed in case the ZMQ publisher is load-balanced and the
# loadbalancer dropped the connection to the backend, but not the
# connection to our side. without this work-around, zmq will think
# that all is well, and we won't receive anything anymore
delta = datetime.utcnow() - last_activity_dts
if delta > timedelta(seconds=reconnect_after_inactivity):
logger.warning(
'There was not activity for {0}, reconnecting'
' to publisher'.format(delta)
)
subscriber.close()
time.sleep(random.randint(1, 10))
subscriber = _get_subscriber(zmq_context)
last_activity_dts = datetime.utcnow()
continue
else:
time.sleep(0.5)
continue
# since zmq is subscribed to everything that starts with the given
# prefix, we have to do a double check to make sure this is an exact
# match.
if not address == expected_address:
continue
logger.debug('Received [{0}]: {1}'.format(address, content))
message = json.loads(content)
if message['action'] == 'enqueue':
_handle_enqueue_action(message, run_queue, event_queue)
elif message['action'] == 'kill':
_handle_kill_action(message, kill_queue, event_queue)
elif message['action'] == 'ping':
_handle_ping_action(message)
subscriber.close()
def _get_subscriber(zmq_context):
"""
Return a new subscriber connection for the given ``zmq_context``.
"""
subscriber = zmq_context.socket(zmq.SUB)
subscriber.connect('tcp://{0}:{1}'.format(
config.get('job_runner_worker', 'broadcaster_server_hostname'),
config.get('job_runner_worker', 'broadcaster_server_port'),
))
subscriber.setsockopt(zmq.SUBSCRIBE, 'master.broadcast.{0}'.format(
config.get('job_runner_worker', 'api_key')))
return subscriber
def _handle_enqueue_action(message, run_queue, event_queue):
"""
Handle the ``'enqueue'`` action.
"""
run = Run('{0}{1}/'.format(
config.get('job_runner_worker', 'run_resource_uri'),
message['run_id']
))
if run.enqueue_dts:
logger.warning(
'Was expecting that run: {0} was not in queue yet'.format(
run.id))
else:
run.patch({
'enqueue_dts': datetime.now(utc).isoformat(' ')
})
run_queue.put(run)
event_queue.put(json.dumps(
{'event': 'enqueued', 'run_id': run.id, 'kind': 'run'}))
def _handle_kill_action(message, kill_queue, event_queue):
"""
Handle the ``'kill'`` action.
"""
kill_request = KillRequest('{0}{1}/'.format(
config.get('job_runner_worker', 'kill_request_resource_uri'),
message['kill_request_id']
))
if kill_request.enqueue_dts:
logger.warning(
'Was expecting that kill: {0} was not in queue yet'.format(
message['kill_request_id']))
else:
kill_request.patch({
'enqueue_dts': datetime.now(utc).isoformat(' ')
})
kill_queue.put(kill_request)
event_queue.put(json.dumps({
'event': 'enqueued',
'kill_request_id': kill_request.id,
'kind': 'kill_request'
}))
def _handle_ping_action(message):
"""
Handle the ``'ping'`` action.
"""
worker_list = Worker.get_list(
config.get('job_runner_worker', 'worker_resource_uri'),
params={
'api_key': config.get('job_runner_worker', 'api_key')
}
)
if len(worker_list) == 1:
worker_list[0].patch({
'ping_response_dts': datetime.now(utc).isoformat(' '),
})
else:
logger.warning('Workers by api_key query resulted in multiple results')
| [
"info@brocaar.com"
] | info@brocaar.com |
b8bc34380ab97947d8f82b337c9f47466f14d6e5 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Introduction_to_numerical_programming_using_Python_and_CPP_Beu/Ch03/Python/P03-Plot0.py | acdc3bf8895ffae30f8fccc6eb2e5aa5a7edaa87 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 963 | py | # Plot a function of one variable
from math import *
from graphlib0 import *
def Func(x): # function to be plotted
return pow(x,3) * exp(-x)
# main
xmin = -0.8; xmax = 7.8 # limits of the plotting interval
n = 50 # number of points
x = [0]*(n+1) # coordinates of points
y = [0]*(n+1)
h = (xmax-xmin)/(n-1) # argument spacing
for i in range(1,n+1):
x[i] = xmin + (i-1)*h # arguments
y[i] = Func(x[i]) # function values
GraphInit(800,600) # create canvas
Plot0(x,y,n,"blue",0.15,0.95,0.15,0.85,"x","y","x^3 * exp(-x)") # create plot
MainLoop() # enter Tkinter event loop
| [
"me@yomama.com"
] | me@yomama.com |
cf0adb8a879d762e37cafe79d7e0551973e1c478 | 0b8cb0ca7870320457132061a1aa964c1481ad49 | /geeksforgeeks/reverse-alternate-levels-binary-tree.py | b947ecf5b9a27c10128b6dbdd89c3e0bbb86cf0e | [] | no_license | hoshinotsuki/DataStructures_Algorithms | c60f8fbd83c29c4cb2a5911386664241cf1fc9ee | fa5085345f8c9da7a3c947d2c8e015169d96c166 | refs/heads/master | 2023-03-16T05:49:40.253244 | 2019-05-12T07:11:33 | 2019-05-12T07:11:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def inorder_print(root):
if root is None:
return
inorder_print(root.left)
print(root.value)
inorder_print(root.right)
def preorder_print(root):
if root is None:
return
print(root.value)
preorder_print(root.left)
preorder_print(root.right)
def postorder_print(root):
if root is None:
return
postorder_print(root.left)
postorder_print(root.right)
print(root.value)
def bfs_print(root):
q = [root]
while len(q) > 0:
c = q.pop(0)
print(c.value)
if c.left:
q.append(c.left)
if c.right:
q.append(c.right)
# Two traversal
def reverse_alternate(root):
stack = []
store_alternate(root, stack, 0)
replace_alternate(root, stack, 0)
def store_alternate(root, stack, depth):
if root is None:
return
store_alternate(root.left, stack, depth+1)
if depth % 2 == 1:
stack.append(root.value)
store_alternate(root.right, stack, depth+1)
def replace_alternate(root, stack, depth):
if root is None:
return
replace_alternate(root.left, stack, depth+1)
if depth % 2 == 1:
root.value = stack.pop()
replace_alternate(root.right, stack, depth+1)
# (Optimal) One traversal
# def reverse_alternate_optimal(root):
# preorder_swap(root.left, root.right, 1)
#
# def preorder_swap(node1, node2, depth):
# if node1 is None or node1 is None:
# return
# if depth % 2 == 1:
# # swap values
# node1.value, node2.value = node2.value, node1.value
#
# preorder_swap(node1.left, node2.right, depth+1)
# preorder_swap(node1.right, node2.left, depth+1)
root = Node('a')
root.left = Node('b')
root.right = Node('c')
root.left.left = Node('d')
root.left.right = Node('e')
root.right.left = Node('f')
root.right.right = Node('g')
root.left.left.left = Node('h')
root.left.left.right = Node('i')
root.left.right.left = Node('j')
root.left.right.right = Node('k')
root.right.left.left = Node('l')
root.right.left.right = Node('m')
root.right.right.left = Node('n')
root.right.right.right = Node('o')
reverse_alternate(root)
print("BFS")
bfs_print(root)
# print("DFS inorder")
# inorder_print(root)
#
# print("DFS preorder")
# preorder_print(root)
#
# print("DFS postorder")
# postorder_print(root)
| [
"watanabe0621@gmail.com"
] | watanabe0621@gmail.com |
9db9b8f1e3a1f3156864d5eca707290639edf7ec | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2094/60788/256591.py | 7e0afe278f2732b0f090d8db9c2d812e6bfeb3b7 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import re
num_re = re.compile(r'^[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)([eE][-+]?[0-9]+)?$')
def f(s):
return bool(num_re.match(s))
print(f(input().strip()))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
94035f21f1d008b8f39cea99a2a598592461df71 | 6112ff74b2a5f602ec2e8323829eb6092f02c56e | /notes.py | 8c2fed70ff3d17569038a1d713efd2a1db09eebb | [
"MIT"
] | permissive | natj/notebook | 84b50a4de8656af46cfc325b8fca684073dc92bf | 5eb3774dde7781e0cbfe9cf301b9625eb5108658 | refs/heads/master | 2021-05-07T04:23:10.910790 | 2019-11-24T08:06:50 | 2019-11-24T08:06:50 | 111,324,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,751 | py | import hashlib
import datetime
import regexes
class Note:
name = ""
title = ""
body = ""
date = ""
# def __init__(self):
def setName(self, name):
self.name = name
def setTitle(self, title):
self.title = title
def setBody(self, body):
body = body.rstrip()
body += "\n"
self.body = body
def setDate(self, date):
self.date = date
def setDateNow(self):
self.date = datetime.datetime.now().strftime("%d-%m-%y")
# return date; if not set use today
def getDate(self):
if self.date == "":
self.setDateNow()
return self.date
def createName(self):
tmp = ""
# if empty name, then create form title
if self.name == "":
# TODO detect all illegal characters
tmp = self.title.replace(" ", "-").lower()
tmp = tmp.replace("/", "").lower()
# else lets use the name slot
else:
tmp = self.name
# append date if there is one set
if not (self.date == ""):
tmp += "_" + self.date
# suffix
tmp += ".md"
# add this to as my name
self.setName(tmp)
return tmp
def hash(self):
m = hashlib.md5()
m.update(self.title.encode("utf-8"))
m.update(self.body.encode("utf-8"))
return m.hexdigest()
# create and save to file
def save(self, directory):
fname = directory + "/" + self.createName()
msg = ""
# title header
# msg += "# {}\n".format(self.name)
# msg += "\n"
msg += "## {}\n".format(self.title)
if not (self.date == ""):
msg += " created: {}\n".format(self.date)
msg += " modified: {}\n".format(self.date)
msg += "--------------------------------------------------\n"
# body
# msg += "\n"
msg += "{}\n".format(self.body)
# write to file
# print(msg)
f = open(fname, "w")
f.write(msg)
f.close()
# print all content
def print(self, msg):
msg += "--------------------------------------------------------------\n"
msg += "## {}\n".format(self.title)
# msg += " created: {}\n".format(self.date)
# msg += " modified: {}\n".format(self.date)
msg += "---:{}\n".format(self.hash())
prewv = self.body.rstrip()
# if len(prewv) > self.previewLen:
# prewv = prewv[:140]
# prewv += " . . ."
msg += "{}\n".format(prewv)
msg += "\n"
return msg
class Inbox(Note):
title = "inbox"
name = "inbox"
def __init__(self, note):
self.body = note.body
self.date = note.date
# print all content
def print(self, msg):
msg += "--------------------------------------------------------------\n"
msg += "## {}\n".format(self.title)
prewv = self.body.rstrip()
msg += "{}\n".format(prewv)
msg += "\n"
return msg
class TaskList(Note):
title = "tasklist"
name = "tasklist"
def __init__(self, note):
self.body = note.body
self.date = note.date
def getTasks(self, msg):
tasks = []
for line in msg.splitlines():
if line == "":
continue
mtask = regexes.REtasks.match(line)
#print("line:", line)
if mtask:
t = mtask.group(1)
#print(" found task that has text:", mtask.group(1))
mcompl = regexes.REtask_compl.match(t)
if not(mcompl):
tasks.append(t)
return tasks
# parse tasks from set of notes
def createTasks(self, notes):
msg = "\n"
for note in notes:
name = note.title
#print("**********")
#print("project name: {}".format(name))
tasks = self.getTasks(note.body)
#print("***tasks were:")
#print(tasks)
#msg += "+++ {}\n".format(name)
itasks = 1
for task in tasks:
#simple print
#msg += "- [ ] {}\n".format(task)
#tabbed print
msg += "{:20.20} | ".format(name)
msg += "-[] {:60.60}".format(task)
if len(task) > 70:
msg += "...\n"
else:
msg += " \n"
#print only 2 first ones
if itasks >= 3:
break
itasks += 1
#msg += "\n"
self.body = msg
# print all content
def print(self, msg):
print("tasklist {} writing to msg".format(self.title))
msg += "--------------------------------------------------------------\n"
msg += "## {}\n".format(self.title)
prewv = self.body.rstrip()
msg += "{}\n".format(prewv)
msg += "\n"
return msg
# read note from file and create an object
def readNoteFile(fname):
n = Note()
f = open(fname, "r")
# read until message
for line in f:
if regexes.REmsgb.match(line):
break
mname = regexes.REname.match(line)
if mname:
n.setName(mname.group(1))
mtitle = regexes.REtitle.match(line)
if mtitle:
n.setTitle(mtitle.group(1))
mdate = regexes.REdate.match(line)
if mdate:
n.setDate(mdate.group(1))
# read body
body = ""
for line in f:
body += line
# TODO catch error if there is no --- separator
n.setBody(body)
f.close()
return n
| [
"nattila.joonas@gmail.com"
] | nattila.joonas@gmail.com |
13db3216e4fdf5ed14791700f50691e3d5ce76b4 | c153118983a83bede2b43cd7d4f14213eb5b597e | /FQA/utils/jiebaSegment.py | 12123b6c630795d6fd28a13096f6e9e7c3881daf | [] | no_license | jessie0624/NLPProjects | 86377f72cf739d5e41697fd6112555b302f24cc0 | 71daf6bbe20300b943d409b4000f4bba6073a84b | refs/heads/main | 2023-01-19T06:02:27.750891 | 2020-11-22T04:18:12 | 2020-11-22T04:18:12 | 303,234,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | import os, sys
import jieba
import codecs
sys.path.append("..")
import config
class Seg(object):
stopword_filepath = config.stop_words_path
def __init__(self):
self.stopwords = set()
self.read_in_stopword()
def load_userdict(self,file_name):
jieba.load_userdict(file_name)
def read_in_stopword(self):
file_obj = codecs.open(self.stopword_filepath, 'r', 'utf-8')
while True:
line = file_obj.readline()
line=line.strip('\r\n')
if not line:
break
self.stopwords.add(line)
file_obj.close()
def cut(self, sentence, stopword=True, cut_all = False):
seg_list = jieba.cut(sentence, cut_all)
results = []
for seg in seg_list:
if stopword and seg in self.stopwords:
continue
results.append(seg)
return results
def cut_for_search(self,sentence, stopword=True):
seg_list = jieba.cut_for_search(sentence)
results = []
for seg in seg_list:
if stopword and seg in self.stopwords:
continue
results.append(seg)
return results | [
"jessie_lijie@126.com"
] | jessie_lijie@126.com |
81f32ee23b2126fdd946c66bd9aedca49466f78a | aad51c9bee18cd75ec60c95df23a97013a5ba601 | /test_colab_selenium.py | 202f996e11fcd1af42e07a34369af9e0133b5d1d | [] | no_license | greysian777/colab-12hrs | 66fcbdf2fe9012a578743cea8fa9e713cac8cac5 | c14b0700a540e50ae1ff6c83911bce8d4c495c11 | refs/heads/master | 2022-08-02T17:25:28.306818 | 2020-05-23T07:25:55 | 2020-05-23T07:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from pathlib import Path
import time
import logging
import random
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
chrome_option = Options()
chrome_option.add_argument("--start_maximized")
if Path('./config').is_file:
CONFIG = open('config').read().splitlines()
EMAIL, PASS, GOOGLE_COLAB_LINK,CHROMEDRIVER_PATH = CONFIG[0], CONFIG[1], CONFIG[2], CONFIG[3]
else:
logger.info('sing nggena ae')
logger.info(f'using {EMAIL}')
driver = webdriver.Chrome(
executable_path=CHROMEDRIVER_PATH, options=chrome_option)
def sleeper(t, silent=True):
for i in range(t, 0, -1):
if silent:
pass
else:
print(i)
time.sleep(1)
print(f'finished sleeping for {t} seconds')
def login():
driver.get('https://mail2.its.ac.id/index.php')
driver.find_element_by_xpath(
'//*[@id="content"]/form/table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr[1]/td[2]/input').send_keys(EMAIL)
driver.find_element_by_xpath(
'//*[@id="content"]/form/table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr[2]/td[2]/input').send_keys(PASS)
driver.find_element_by_xpath(
'//*[@id="content"]/form/table/tbody/tr/td/table/tbody/tr/td/table/tbody/tr[3]/td[2]/input').click()
sleeper(3, silent=False)
logger.info(driver.current_url)
if 'speedbump' in driver.current_url:
logger.info('google speed bump detected, clicking continue now')
driver.find_element_by_xpath(
'//*[@id="view_container"]/div/div/div[2]/div/div[2]/div/div[1]/div/span/span').click()
driver.get('https://google.com/')
logger.info('logged in to integra')
logger.info('logged in to gmail')
driver.get(GOOGLE_COLAB_LINK)
def main():
logger.info('clicking connect')
sleepy_time = random.randint(10,20)
logging.info(f'sleeping for {sleepy_time} seconds')
sleeper(sleepy_time)
driver.find_element_by_xpath(
'//*[@id="top-toolbar"]/colab-connect-button').click()
if __name__ == "__main__":
c = 0
login()
while True:
try:
main()
except Exception as e:
logger.info('failed, retrying now')
logger.info(e)
c = 0
continue
c += 1
logger.info(f'clicked for {c}')
| [
"svmihar@gmail.com"
] | svmihar@gmail.com |
a49a6ba95a088be95166e25ae39887722cfd44c0 | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/interviewbit/heaps_and_maps/lru_cache/lru_cache.py | b68e4548039637708100ece4e3a0dd9527e0da57 | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | WINDOWS-1258 | Python | false | false | 7,620 | py | #encoding: utf-8
'''
https://www.interviewbit.com/problems/lru-cache/
LRU Cache
Design and implement a data structure for LRU (Least Recently Used) cache. It should support the following operations: get and set.
get(key) -
Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
set(key, value) -
Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate the least recently used item before inserting the new item.
The LRU Cache will be initialized with an integer corresponding to its capacity. Capacity indicates the maximum number of unique keys it can hold at a time.
Definition of ¿least recently used¿ : An access to an item is defined as a get or a set operation of the item.
¿Least recently used¿ item is the one with the oldest access time.
NOTE: If you are using any global variables, make sure to clear them in the constructor.
Example :
Input :
capacity = 2
set(1, 10)
set(5, 12)
get(5) returns 12
get(1) returns 10
get(10) returns -1
set(6, 14) this pushes out key = 5 as LRU is full.
get(5) returns -1
'''
'''
Solution Outline:
1. Use an SLL-based queue with lookup for nodes.
1.1 Initialize SLL-Queue with a dummy tail so any node can be deleted in O(1) given only its reference
1.2 The lookup table for nodes contains a mapping for cache keys -> nodes in the queue
2. set(key, value):
2.1 if the cache already has the key, move its node to the back marking it as MRU
Update its value in the node
2.2 Otherwise.
if the LRU-cache is at full-capacity: evict a LRU entry from the front of the queue.
Add the new key to the back of the queue.
3. get(key):
3.1 If the key is not found in the lookup table, => return -1
3.2 Get the node for the key, move it to the back of the queue marking it as MRU
return its value
Sample run:
Lookup: {}
Queue: []
capacity: 2
set(1,1)
lookup(1): keyerror
Add to queue, update lookup table with the key->node
Queue: (1,1)
Lookup: {1:n1}
set(2,2)
lookup(2): keyerror
Add to queue, update lookup table with the key->node
Queue: n1(1,1) -> n2(2,2)
Lookup: {1:n1, 2:n2}
get(1):
lookup(1) -> n1
val: 1
Move (1,1) to back of the queue
Queue: n2(2,2) -> n1(1,1)
return 1
set(3,3):
len(Queue) == 2 == capacity
evict
Queue front: n2(2,2) -> dequeue (n2)
Queue: n1(1,1)
lookup: {1: n1}
Enqueue(3,3):
Queue: n1(1,1) -> n3(3,3)
lookup: {1: n1, 3: n3}
'''
class SLLQueueWithLookup:
class Node:
def __init__(self, item=None):
self.item = item
self.next = None
def __str__(self):
return str(self.item) if self.item else '<None>'
def __init__(self):
# Initialize SLL with a dummy node
self.head = self.tail = SLLQueueWithLookup.Node()
self.lookup = {}
self.num_items = 0
def __str__(self):
tmp = self.head
qstr = []
while tmp:
qstr.append(str(tmp))
tmp = tmp.next
dstr = []
for k,v in self.lookup.items():
dstr.append(str(k) + ":" + str(v))
return str(qstr) + '\n' + str(dstr)
# Number of items in the SLL-Queue
def __len__(self):
return self.num_items
# Add 'key' to the back of the queue
def enqueue(self, key, value):
# Create a new dummy node for tail, and append it
new = SLLQueueWithLookup.Node()
node = self.tail
node.next = new
# Use current dummy tail node to store 'key'
node.item = (key, value)
self.lookup[key] = node
# Make new node the dummy tail node
self.tail = new
self.num_items += 1
# Remove 'key' from the front of the queue and lookup table
def dequeue(self):
if self.num_items == 0:
return
self.lookup[self.head.item[0]] = None
self.removeNode(self.head)
# Remove 'key' and its node from the SLL queue
def remove(self, key):
if self.lookup.get(key) is None:
# key doesn't exist in the cache
return
node = self.lookup[key]
self.lookup[key] = None
self.removeNode(node)
# Remove a specified node from the SLL queue
def removeNode(self, node):
self.num_items-= 1
# copy node.next item into node
# and remove node.next from the list
node.item = node.next.item
node.next = node.next.next
if node.item is not None:
# node that previously contained item(to delete)
# now contains next node's item
# update lookup table
self.lookup[node.item[0]] = node
else: # node.item == None:
# 'node' was the last 'valid' node in the SLL
# make it the new dummy tail
self.tail = node
# Re-enqueue 'key' to the back of the queue
# Unsafe: will not check if the queue is empty or
# if the key doesn't exist in the queue/lookup table
# Caller is expected to check before calling
def reEnqueue(self, key, value=None):
node = self.lookup[key]
if value is None:
value = node.item[1]
if node.next == self.tail:
# re-enqueuing queue's last node
# is redundant
# will place it back at the same place
# return without doing anything
node.item = (key, value)
return
# Yank node from the SLL queue
# copy node.next item into node
# and remove node.next from the list
yanked = node.next
node.item = node.next.item
node.next = node.next.next
# node that previously contained item(to delete)
# now contains next node's item
# update lookup table
self.lookup[node.item[0]] = node
# Re-enqueue 'yanked' node at the end of the SLL-Queue
self.tail.next = yanked
self.tail.item = (key, value)
# update lookup table for key
self.lookup[key] = self.tail
# Make 'yanked' node the new dummy tail node
yanked.item = None
yanked.next = None
self.tail = yanked
# Lookup key's node, and return its value
# None if the key doesn't exist
def get(self, key):
node = self.lookup.get(key)
return (node.item[1] if node is not None else None)
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.queue = SLLQueueWithLookup()
self.capacity = capacity
# @return an integer
def get(self, key):
value = self.queue.get(key)
if value == None:
return -1
# Re-enqueue 'key' to the back of the queue so it becomes MRU
self.queue.reEnqueue(key)
return value
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
# if key already exists,
# update with new value and re-enqueue key to the back of the queue
old_value = self.queue.get(key)
if (old_value is not None):
self.queue.reEnqueue(key, value)
return
if (len(self.queue) == self.capacity):
# Dequeue least recently used item from the queue
# Remove it from the table as well
self.queue.dequeue()
# Add to the end of the queue
node = self.queue.enqueue(key, value)
if __name__ == '__main__':
#2,[set(2,1),set(2,2),get(2),set(1,1),set(4,1),get(2)]
cache1 = LRUCache(2)
cache1.set(2,1)
cache1.set(2,2)
assert cache1.get(2) == 2
cache1.set(1,1)
cache1.set(4,1) # evicts (2,2)
assert cache1.get(2) == -1
cache = LRUCache(2)
cache.set(1,2)
cache.set(2,3)
assert(cache.get(1) == 2)
cache.set(3,4) # Invalidates (2,3)
assert(cache.get(2) == -1)
assert(cache.get(3) == 4)
cache.set(4,5) # Invalidates (1,2)
assert(cache.get(1) == -1)
assert(cache.get(4) == 5)
assert(cache.get(3) == 4)
cache3 = LRUCache(2)
cache3.set(1,11)
cache3.set(2,22)
assert cache3.get(1) == 11
cache3.set(3,33) # evicts (2,22)
assert cache3.get(2) == -1
cache3.set(4,44) # evicts (1,11)
assert cache3.get(1) == -1
assert cache3.get(3) == 33
assert cache3.get(4) == 44
| [
"vinithepooh@gmail.com"
] | vinithepooh@gmail.com |
471480649a8d4bfbc594f739fef463d5f34b1ed4 | b90053c31785e695ba858d750d8515f497c83c89 | /hyperopt/rand.py | 50f675fac0b88cc99b6e25382da6d1bd2d33899c | [] | no_license | npinto/hyperopt | 0df5ca621e7ed47b69c798a599ade56298ab770a | 2249ec648ede0a0fffc80383da8f085b0f3d2a18 | refs/heads/master | 2021-01-17T23:08:21.223904 | 2012-07-30T17:52:13 | 2012-07-30T17:52:13 | 3,286,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | """
Random search - presented as hyperopt.fmin_random
"""
from .base import miscs_update_idxs_vals
import pyll
import hyperopt
def suggest(new_ids, domain, trials, seed=123):
rval = []
for new_id in new_ids:
# -- sample new specs, idxs, vals
idxs, vals = pyll.rec_eval(domain.s_idxs_vals,
memo={domain.s_new_ids: [new_id]})
new_result = domain.new_result()
new_misc = dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)
miscs_update_idxs_vals([new_misc], idxs, vals)
rval.extend(trials.new_trial_docs([new_id],
[None], [new_result], [new_misc]))
return rval
| [
"james.bergstra@gmail.com"
] | james.bergstra@gmail.com |
be75de062938ae461aee4a45a827f3d669428556 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/6040/362006040.py | fec08b0c4c9a5dde5cbd5e30e4b23d0499d0ea31 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,085 | py | from bots.botsconfig import *
from records006040 import recorddefs
syntax = {
'version': '00604',
'functionalgroup': 'OC',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 1, MAX: 1},
{ID: 'REF', MIN: 1, MAX: 9},
{ID: 'N1', MIN: 1, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'DTP', MIN: 0, MAX: 1},
{ID: 'L5', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 1, MAX: 20},
{ID: 'DTP', MIN: 1, MAX: 9},
{ID: 'V1', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 9},
{ID: 'R1', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'PCT', MIN: 0, MAX: 1},
{ID: 'R4', MIN: 1, MAX: 5, LEVEL: [
{ID: 'NX2', MIN: 0, MAX: 20},
]},
{ID: 'AMT', MIN: 1, MAX: 9, LEVEL: [
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'LQ', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'PCT', MIN: 0, MAX: 1},
{ID: 'DTP', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"doug.vanhorn@tagglogistics.com"
] | doug.vanhorn@tagglogistics.com |
db1c238c2307a78ed848ead6fa4abbb78a1beb11 | f62fd455e593a7ad203a5c268e23129473d968b6 | /zaqar-4.0.0/zaqar/transport/wsgi/v1_0/metadata.py | 0fb7d1465b9545e50e3b95de27416384c4442c1c | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 3,659 | py | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon
from oslo_log import log as logging
import six
from zaqar.common import decorators
from zaqar.i18n import _
from zaqar.storage import errors as storage_errors
from zaqar.transport import utils
from zaqar.transport import validation
from zaqar.transport.wsgi import errors as wsgi_errors
from zaqar.transport.wsgi import utils as wsgi_utils
LOG = logging.getLogger(__name__)
class Resource(object):
__slots__ = ('_wsgi_conf', '_validate', '_queue_ctrl')
def __init__(self, _wsgi_conf, validate, queue_controller):
self._wsgi_conf = _wsgi_conf
self._validate = validate
self._queue_ctrl = queue_controller
@decorators.TransportLog("Queue metadata")
def on_get(self, req, resp, project_id, queue_name):
try:
resp_dict = self._queue_ctrl.get_metadata(queue_name,
project=project_id)
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPNotFound(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Queue metadata could not be retrieved.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.content_location = req.path
resp.body = utils.to_json(resp_dict)
# status defaults to 200
@decorators.TransportLog("Queue metadata")
def on_put(self, req, resp, project_id, queue_name):
try:
# Place JSON size restriction before parsing
self._validate.queue_metadata_length(req.content_length)
# Deserialize queue metadata
document = wsgi_utils.deserialize(req.stream, req.content_length)
metadata = wsgi_utils.sanitize(document, spec=None)
# Restrict setting any reserved queue attributes
for key in metadata:
if key.startswith('_'):
description = _(u'Reserved queue attributes in metadata '
u'(which names start with "_") can not be '
u'set in API v1.')
raise validation.ValidationFailed(description)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
try:
self._queue_ctrl.set_metadata(queue_name,
metadata=metadata,
project=project_id)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except storage_errors.QueueDoesNotExist as ex:
raise wsgi_errors.HTTPNotFound(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Metadata could not be updated.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.status = falcon.HTTP_204
resp.location = req.path
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
0cf4cc42f36207bd3ddb02f5b8cd8c97c84e0060 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/gui/HUD2/features/Consumables/ConsumableController.py | bb4ce6df1e0f3d0e5e03d33020690780f72bfb3b | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | # Embedded file name: scripts/client/gui/HUD2/features/Consumables/ConsumableController.py
import functools
import InputMapping
from gui.HUD2.core.DataPrims import DataController
from gui.HUD2.hudFeatures import Feature
class ConsumableController(DataController):
def __init__(self, features):
self._playerAvatar = features.require(Feature.PLAYER_AVATAR)
self._consumableManager = features.require(Feature.CONSUMABLES_MANAGER)
self._processor = features.require(Feature.INPUT).commandProcessor
for command in InputMapping.EQUIPMENT_COMMANDS:
self._processor.addListeners(command, functools.partial(self._useConsumbale, command))
def _useConsumbale(self, command):
equipmentCommands = InputMapping.EQUIPMENT_COMMANDS
if command in equipmentCommands:
slotID = equipmentCommands.index(command)
consumable = self._playerAvatar.consumables[slotID]
if consumable['key'] != -1 and int(consumable['chargesCount']) != 0:
if self._consumableManager.getStatusForConsumable(consumable['key']) != -1:
self._playerAvatar.cell.useConsumable(slotID, -1)
def dispose(self):
for command in InputMapping.EQUIPMENT_COMMANDS:
self._processor.removeListeners(command, functools.partial(self._useConsumbale, command))
self._processor = None
self._playerAvatar = None
self._consumableManager = None
return | [
"55k@outlook.com"
] | 55k@outlook.com |
9d82a1a0b220e3c400344bd86cf2981dc3585041 | 282769509af68245596dc73de42f552cfd73cd21 | /autoindex/main.py | 9d22a8d7d27f05ebffd5f1489867adfa15f4fe96 | [] | no_license | brutasse-archive/autoindex | 1130173d22c1d996a7cb38fcd59b51d07c0b8068 | cc5cfc414325aff133c684257e8c2bfdc9aaa672 | refs/heads/master | 2021-01-19T14:34:18.472167 | 2012-07-17T21:31:27 | 2012-07-17T21:31:27 | 5,048,409 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | import args
import logging
import os
import urlparse
import sys
from . import watcher, indexer, mirror
from .utils import error
ACTIONS = ['mirror', 'watch', 'index']
SILENCE_LOGGERS = [
"requests.packages.urllib3.connectionpool",
]
def show_help():
print """Usage: %s -d directory [-i indexserver] action
Available actions: watch, index, mirror.""" % sys.argv[0]
sys.exit(1)
class SilenceFilter(logging.Filter):
def filter(self, record):
return 0
filter_ = SilenceFilter('silence')
def main():
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s %(asctime)s %(name)s: %(message)s",
)
for logger in SILENCE_LOGGERS:
log = logging.getLogger(logger)
log.addFilter(filter_)
action = None
directory = None
index_server = 'http://pypi.python.org'
index_set = False
for name, target in args.grouped.iteritems():
if name == '_' and action is None:
action = target[0]
elif name == '_': # Only one action at once
show_help()
if name == '-d' and directory is None and target:
directory = target[0]
if len(target) > 1:
action = target[1]
elif name == '-d':
show_help()
if name == '-i' and index_set is False and target:
index_server = target[0]
index_set = True
if len(target) > 1:
action = target[1]
elif name == '-i':
show_help()
if action is None or directory is None:
show_help()
if action not in ACTIONS:
show_help()
directory = os.path.abspath(directory)
if action == 'index':
indexer.index(directory)
elif action == 'watch':
watcher.watch(directory)
elif action == 'mirror':
parsed = urlparse.urlparse(index_server)
if parsed.scheme not in ['http', 'https']:
error("Invalid URL scheme: {0}".format(repr(parsed.scheme)))
parsed = list(parsed)
parsed[2] = '/simple/'
index_url = urlparse.urlunparse(parsed)
mirror.mirror(directory, index_url)
sys.exit(0)
| [
"buburno@gmail.com"
] | buburno@gmail.com |
4f1d9e5082bfd413f7f1b53a445cbbd19c5c846d | e9c0bb90f07144e26e54b78abc9d102b7affc9f8 | /billreminder/api/__init__.py | a223c4f70060a46701c2149570b3e9c407efed70 | [] | no_license | linxaddict/billreminder | fe8b3aee275172518f1e4757e4a89350f2bd2517 | 7c8f52b8d3bdc55199b4f6417d960facf5c6857e | refs/heads/master | 2021-01-11T17:19:07.965038 | 2017-03-29T19:57:29 | 2017-03-29T19:57:29 | 79,741,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from flask import Blueprint
__author__ = 'Marcin Przepiórkowski'
__email__ = 'mprzepiorkowski@gmail.com'
blueprint = Blueprint('api_v1', __name__)
| [
"mprzepiorkowski@gmail.com"
] | mprzepiorkowski@gmail.com |
72a121a76295373b4a46d60a9f2f0629d7eab90e | 24ebb68eb7b60f04ec0cf5e0cf775e3e3e619edd | /sol/admin.py | 28cc038f85a2fa8af966c5af4f44ffdaec728e53 | [] | no_license | MATT143/SbpSol | 554f5bf9dd3367562d727a1f76904acc0c51319b | 2d7c03918fbf80945e87bbef44eeb6eef4ef6620 | refs/heads/master | 2022-04-20T10:31:01.985988 | 2020-04-18T04:08:21 | 2020-04-18T04:08:21 | 256,666,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from django.contrib import admin
from .models import com_order_mapper
# Register your models here.
admin.site.register(com_order_mapper) | [
"mnahak@cisco.com"
] | mnahak@cisco.com |
cc109d1b6b56306fed846dbaaeb6e23d27f144ba | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2BB/2BB-2V_MD_NVT_rerun/set_5.py | de3fd1f8c5f88932193bf72c729fd3dad69dcb4b | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2BB/MD_NVT_rerun/ti_one-step/2BB_2V/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
c30e2545895d8d3c43eea2542f4304ff7c58e870 | 2954ce7184455bddb4e47c9a46d198fcc4b9bbce | /毕业之后/toridasumatome.py | b0d807a2ed45ea34f5826fee620611ad2e2f2639 | [] | no_license | shuyuqing/- | c9668a402cb652085c95a9414daafc1c847d3dbf | bec903bc3426e12e301633fe950961ea1e247c67 | refs/heads/master | 2022-07-05T16:59:23.547338 | 2021-09-19T08:23:49 | 2021-09-19T08:23:49 | 182,628,661 | 4 | 0 | null | 2022-06-21T22:47:03 | 2019-04-22T05:28:13 | Python | UTF-8 | Python | false | false | 806 | py | #把目录里面的文件全部都拿出来,统一放到一个文件夹下面
import os;
from shutil import copyfile
Path=r"C:\Users\a7825\Desktop\server"
def getAllFiles(path):
flist = []
for root, dirs, fs in os.walk(path):
for f in fs:
f_fullpath = os.path.join(root, f)
# f_relativepath = f_fullpath[len(path):]#获取文件相对路径
flist.append(f_fullpath)
return flist
def matome(afiles):
newfile=os.path.join(Path,"matome")
os.makedirs(newfile)#创建文件夹
for path in afiles:
copyfile(path,os.path.join(newfile,os.path.basename(path)))#复制文件/从路径中取出末尾文件名
if __name__ == '__main__':
afiles = getAllFiles(Path)
matome(afiles)
print("\ndone!")
| [
"noreply@github.com"
] | shuyuqing.noreply@github.com |
b65401a2a93ec86b3e85352d6b6482ef79997f3b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2652486_0/Python/daybreakcx/C.py | fc9b493a717e4853e24efb466de8dae51bfaf30b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | #!/usr/bin/python2
import math;
if __name__ == "__main__":
T = (int)(raw_input());
s = raw_input().split();
R = (int)(s[0]);
N = (int)(s[1]);
M = (int)(s[2]);
K = (int)(s[3]);
su = [];
for i in range(5**3):
su.append({});
for i in range(2, M + 1):
for j in range(2, M + 1):
for k in range(2, M + 1):
s = "%d%d%d" % (i, j, k);
num = [i, j, k];
for l in range(0, 8):
t = 1;
for b in range(3):
if ((l & (1 << b)) != 0):
t = t * num[b];
su[t - 1].setdefault(s, 0);
su[t - 1][s] += 1;
pr = [];
for i in range(5**3):
pr.append({});
tot = 0;
for x in su[i]:
tot += su[i][x];
for k in su[i].keys():
pr[i][k] = math.log((float)(su[i][k]) / tot);
print "Case #1:";
for i in range(R):
s = raw_input().split();
res = pr[(int)(s[0]) - 1].copy();
for i in range(1, len(s)):
val = (int)(s[i]) - 1;
tmp = res.copy();
for k in tmp.keys():
if (k not in pr[val]):
del res[k];
for k in pr[val]:
if (k in res):
res[k] += pr[val][k];
bst = "222";
fst = True;
for k in res:
if (fst):
bst = k;
elif (res[k] > res[bst]):
bst = k;
print bst;
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
5e6b59bdfb0a0e8077e93c709d4ff25600917e6b | dc0d7e49eafe40f1c41f631621a6ccdefdcbbf7c | /press/helpers/package.py | 82452aae44d609bd24d24828189f8a9c09da511a | [] | no_license | jr0d/press | b2314b319da5b44d23110036064775796246c5c1 | 477b78700b644b2d333f4d9289f319a52fc54100 | refs/heads/master | 2021-06-15T20:44:18.061919 | 2019-04-24T17:01:37 | 2019-04-24T17:01:37 | 80,559,927 | 7 | 3 | null | 2021-03-25T21:49:09 | 2017-01-31T20:38:44 | Python | UTF-8 | Python | false | false | 394 | py | import os
import pkg_resources
def get_package_version(package):
try:
dist = pkg_resources.get_distribution(package)
except pkg_resources.DistributionNotFound:
return None
return dist.version
def get_press_version():
return get_package_version('press')
def get_press_location():
return os.path.abspath(pkg_resources.get_distribution('press').location)
| [
"jared.rodriguez@rackspace.com"
] | jared.rodriguez@rackspace.com |
5fd66b1f6f8c2282a4603fca785d42c2ce9e6ccf | 96e6c98ecdb6fc5fb1a7212e48b68a18b98b5116 | /aiosmb/commons/serverinfo.py | a75c76c889fefb2f6783ef7eb970861dd6d22a02 | [] | no_license | raystyle/aiosmb | e4b5f1033cb52744f6485f4b921e7ac08cf77ae1 | 56e0e4d03fdcda626d3005ff80c65ec2d1239060 | refs/heads/master | 2020-06-07T16:33:00.913731 | 2019-06-12T22:34:08 | 2019-06-12T22:34:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py |
from aiosmb.ntlm.structures.avpair import AVPAIRType
from aiosmb.dtyp.structures.filetime import FILETIME
class NTLMServerInfo:
def __init__(self):
self.domainname = None
self.computername = None
self.dnscomputername = None
self.dnsdomainname = None
self.local_time = None
self.dnsforestname = None
self.os_major_version = None
self.os_minor_version = None
self.os_build = None
self.os_guess = None
@staticmethod
def from_challenge(challenge):
si = NTLMServerInfo()
ti = challenge.TargetInfo
for k in ti:
if k == AVPAIRType.MsvAvNbDomainName:
si.domainname = ti[k]
elif k == AVPAIRType.MsvAvNbComputerName:
si.computername = ti[k]
elif k == AVPAIRType.MsvAvDnsDomainName:
si.dnsdomainname = ti[k]
elif k == AVPAIRType.MsvAvDnsComputerName:
si.dnscomputername = ti[k]
elif k == AVPAIRType.MsvAvDnsTreeName:
si.dnsforestname = ti[k]
elif k == AVPAIRType.MsvAvTimestamp:
if isinstance(ti[k], bytes):
si.local_time = FILETIME.from_bytes(ti[k]).datetime
elif isinstance(ti[k], dateime):
si.local_time = ti[k]
if challenge.Version is not None:
si.os_major_version = challenge.Version.ProductMajorVersion
si.os_minor_version = challenge.Version.ProductMinorVersion
si.os_build = challenge.Version.ProductBuild
si.os_guess = challenge.Version.WindowsProduct
return si
def __str__(self):
t = '=== Server Info ====\r\n'
for k in self.__dict__:
t += '%s: %s\r\n' % (k, self.__dict__[k])
return t | [
"info@skelsec.com"
] | info@skelsec.com |
b03cc4f5c9c1199ff4210717a984aa6830a972b7 | 10717fe6f68c4ee9bcf27ee62e89581f4a030b8e | /extractor/funimation.py | ce1ee9d43d5e0b8fba42a813e94d85c62dd402e2 | [] | no_license | HagerHosny199/Testing_Project | ff7f9a54b7a213c9d9ade0c5192845c2a29adc8b | 9bc170263e239cc24ccfb2aa33b9913ff799ffe9 | refs/heads/master | 2020-05-17T20:57:01.750640 | 2019-05-08T22:13:06 | 2019-05-08T22:13:06 | 183,954,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,674 | py | # coding: utf-8
from __future__ import unicode_literals
import random
import string
from .common import InfoExtractor
from compat import compat_HTTPError
from utils import (
determine_ext,
int_or_none,
js_to_json,
ExtractorError,
urlencode_postdata
)
class FunimationIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funimation(?:\.com|now\.uk)/shows/[^/]+/(?P<id>[^/?#&]+)'
_NETRC_MACHINE = 'funimation'
_TOKEN = None
_TESTS = [{
'url': 'https://www.funimation.com/shows/hacksign/role-play/',
'info_dict': {
'id': '91144',
'display_id': 'role-play',
'ext': 'mp4',
'title': '.hack//SIGN - Role Play',
'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd',
'thumbnail': r're:https?://.*\.jpg',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.funimation.com/shows/attack-on-titan-junior-high/broadcast-dub-preview/',
'info_dict': {
'id': '210051',
'display_id': 'broadcast-dub-preview',
'ext': 'mp4',
'title': 'Attack on Titan: Junior High - Broadcast Dub Preview',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.funimationnow.uk/shows/puzzle-dragons-x/drop-impact/simulcast/',
'only_matching': True,
}]
def _login(self):
username, password = self._get_login_info()
if username is None:
return
try:
data = self._download_json(
'https://prod-api-funimationnow.dadcdigital.com/api/auth/login/',
None, 'Logging in', data=urlencode_postdata({
'username': username,
'password': password,
}))
self._TOKEN = data['token']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
error = self._parse_json(e.cause.read().decode(), None)['error']
raise ExtractorError(error, expected=True)
raise
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
def _search_kane(name):
return self._search_regex(
r"KANE_customdimensions\.%s\s*=\s*'([^']+)';" % name,
webpage, name, default=None)
title_data = self._parse_json(self._search_regex(
r'TITLE_DATA\s*=\s*({[^}]+})',
webpage, 'title data', default=''),
display_id, js_to_json, fatal=False) or {}
video_id = title_data.get('id') or self._search_regex([
r"KANE_customdimensions.videoID\s*=\s*'(\d+)';",
r'<iframe[^>]+src="/player/(\d+)',
], webpage, 'video_id', default=None)
if not video_id:
player_url = self._html_search_meta([
'al:web:url',
'og:video:url',
'og:video:secure_url',
], webpage, fatal=True)
video_id = self._search_regex(r'/player/(\d+)', player_url, 'video id')
title = episode = title_data.get('title') or _search_kane('videoTitle') or self._og_search_title(webpage)
series = _search_kane('showName')
if series:
title = '%s - %s' % (series, title)
description = self._html_search_meta(['description', 'og:description'], webpage, fatal=True)
try:
headers = {}
if self._TOKEN:
headers['Authorization'] = 'Token %s' % self._TOKEN
sources = self._download_json(
'https://www.funimation.com/api/showexperience/%s/' % video_id,
video_id, headers=headers, query={
'pinst_id': ''.join([random.choice(string.digits + string.ascii_letters) for _ in range(8)]),
})['items']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
error = self._parse_json(e.cause.read(), video_id)['errors'][0]
raise ExtractorError('%s said: %s' % (
self.IE_NAME, error.get('detail') or error.get('title')), expected=True)
raise
formats = []
for source in sources:
source_url = source.get('src')
if not source_url:
continue
source_type = source.get('videoType') or determine_ext(source_url)
if source_type == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4',
m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': source_type,
'url': source_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'series': series,
'season_number': int_or_none(title_data.get('seasonNum') or _search_kane('season')),
'episode_number': int_or_none(title_data.get('episodeNum')),
'episode': episode,
'season_id': title_data.get('seriesId'),
'formats': formats,
}
| [
"hagarhosny19@gmail.com"
] | hagarhosny19@gmail.com |
cba903f7e47a262c65c23df2865acf9204600d6e | 21b0b4c27193898207751c91b8b2ed168a1b1638 | /py/py_0022_names_scores.py | d4155cdbd69ffa5c00eb79c3bfee74b5999dbc1a | [
"MIT"
] | permissive | lcsm29/project-euler | 67560a4e66968f1671a3d7ecf2dda6c956893dca | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | refs/heads/main | 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | # Solution of;
# Project Euler Problem 22: Names scores
# https://projecteuler.net/problem=22
#
# Using names.txt, a 46K text file containing over five-thousand first names,
# begin by sorting it into alphabetical order. Then working out the
# alphabetical value for each name, multiply this value by its alphabetical
# position in the list to obtain a name score.
#
# For example, when the list is sorted into alphabetical order, COLIN,
# which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
# So, COLIN would obtain a score of 938 × 53 = 49714.
#
# What is the total of all the name scores in the file?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def fn_brute(n):
with open(n, 'r') as f:
content = f.readlines()
value = {c: i + 1 for i, c in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ')}
names = [name.strip('"') for name in content[0].split(',')]
return sum([sum([value[c] for c in name]) * (i + 1)
for i, name in enumerate(sorted(names))])
if __name__ == '__main__':
n = 'py_0022_names.txt'
i = 1
prob_id = 22
timed.caller(fn_brute, n, i, prob_id)
| [
"lcsm29@outlook.com"
] | lcsm29@outlook.com |
c3fc1102d39052701862f36df2becbe8bebc1cd6 | ff8bcffe1ad8079004b66cdd47e3ba6f7fe52616 | /superlists/manage.py | 0e0d9a30ac2271770e4fce1849976c988b1f85a3 | [] | no_license | nezaj/tdd-with-python | 1cb34d4f524e384540358e1ba19ad329d84a84cb | 1aec8b0ed672bc85dea5157cc4d8aadae4ac0d8e | refs/heads/master | 2016-09-10T22:42:17.503466 | 2014-11-14T23:03:05 | 2014-11-14T23:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | #!/usr/bin/env python3
import os
import sys
from settings import app_config
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", app_config)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"joeaverbukh@gmail.com"
] | joeaverbukh@gmail.com |
cbff601ab46b34759d6bd239213f1295f7f8d03f | 4ae3b27a1d782ae43bc786c841cafb3ace212d55 | /Py_op/example_log/b_glob_log.py | de83db5c330d2591918f7c981bc9b70f2b3bac95 | [] | no_license | bopopescu/Py_projects | c9084efa5aa02fd9ff6ed8ac5c7872fedcf53e32 | a2fe4f198e3ca4026cf2e3e429ac09707d5a19de | refs/heads/master | 2022-09-29T20:50:57.354678 | 2020-04-28T05:23:14 | 2020-04-28T05:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | import glob
import os
import sys
import concurrent.futures
import timeit
"""
def make_image_thumbnail(filename):
# The thumbnail will be named "<original_filename>_thumbnail.jpg"
base_filename, file_extension = os.path.splitext(filename)
return filename
# Loop through all jpeg files in the folder and make a thumbnail for each
#do some nice things...
#sys.stdout.write("Total running time: %d:%d:%d.\n" % (hours, mins, secs))
from multiprocessing import Pool
def calculate(number):
print(number)
return number
if __name__ == '__main__':
pool = Pool()
result = pool.map(calculate, range(4))
start = timeit.default_timer()
for i in range(1):
for image_file in glob.glob("*.py"):
thumbnail_file = make_image_thumbnail(image_file)
# print(thumbnail_file)
print(100)
stop = timeit.default_timer()
total_time = stop - start
# output running time in a nice format.
mins, secs = divmod(total_time, 60)
hours, mins = divmod(mins, 60)
from concurrent.futures import ProcessPoolExecutor
def calculate(number):
print(number)
print("//")
with ProcessPoolExecutor() as executor:
result = executor.map(calculate, range(10,15))
ProcessPoolExecutor().map(calculate, range(20,25))
print("/")
"""
import concurrent.futures
import urllib.request
URLS = ['http://www.baidu.com/',
'http://www.sina.com/',
'http://www.mi.com/',
'http://jd.com/',
'http://taobao.com/',
'http://google.com/',
'http://yahoo.com',
'http://usatoday.com',
'http://nbc.com']
def load_url(url, timeout):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
for line, val in enumerate(future_to_url):
print(line, val)
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
else:
print('%r page is %d bytes' % (url, len(data)))
| [
"sunusd@yahoo.com"
] | sunusd@yahoo.com |
ece25bb1aae38b23a21fffc9b33ece1a47647610 | 6b98594c029605806418d187672f476fde5792b7 | /rllab/core/serializable.py | 2505c6ab9c2179e97b5497f8d830bb5863d2aad6 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | russellmendonca/GMPS | 3f65eb250bff008da9beea6b9d1f04aca2d46c6a | 638087160c48f9f016dc74b0904f8ba2503ea285 | refs/heads/master | 2023-01-22T07:53:21.504243 | 2019-10-26T11:23:42 | 2019-10-26T11:23:42 | 178,866,391 | 18 | 8 | NOASSERTION | 2023-01-19T06:42:27 | 2019-04-01T13:12:40 | Python | UTF-8 | Python | false | false | 1,435 | py | import inspect
class Serializable(object):
def __init__(self, *args, **kwargs):
self.__args = args
self.__kwargs = kwargs
def quick_init(self, locals_):
if getattr(self, "_serializable_initialized", False):
return
spec = inspect.getargspec(self.__init__)
# Exclude the first "self" parameter
in_order_args = [locals_[arg] for arg in spec.args][1:]
if spec.varargs:
varargs = locals_[spec.varargs]
else:
varargs = tuple()
if spec.keywords:
kwargs = locals_[spec.keywords]
else:
kwargs = dict()
self.__args = tuple(in_order_args) + varargs
self.__kwargs = kwargs
setattr(self, "_serializable_initialized", True)
def __getstate__(self):
return {"__args": self.__args, "__kwargs": self.__kwargs}
def __setstate__(self, d):
# convert all __args to keyword-based arguments
in_order_args = inspect.getargspec(self.__init__).args[1:]
out = type(self)(**dict(zip(in_order_args, d["__args"]), **d["__kwargs"]))
self.__dict__.update(out.__dict__)
@classmethod
def clone(cls, obj, **kwargs):
assert isinstance(obj, Serializable)
d = obj.__getstate__()
d["__kwargs"] = dict(d["__kwargs"], **kwargs)
out = type(obj).__new__(type(obj))
out.__setstate__(d)
return out
| [
"russellm@berkeley.edu"
] | russellm@berkeley.edu |
389ae3f4ee1b0a20461b194c2e7d174236f1a9c1 | 753f729f33a1b00a0a7f5c78d217cc4c609aee6f | /n44/app/migrations/0001_initial.py | 6fc4491c07ec439375e0d57cbfdecf68d7027023 | [] | no_license | nayan-gujju/DRF-Code | 874114a861042d558112f1a8ec95daf1356d5493 | 6fb3fdd5dde352e7b6e3a7363da0e7a3057b1ede | refs/heads/master | 2023-08-06T12:42:23.551603 | 2021-10-06T11:34:54 | 2021-10-06T11:34:54 | 404,650,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | # Generated by Django 3.2.6 on 2021-10-05 06:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=20)),
('lastname', models.CharField(max_length=20)),
('roll', models.IntegerField()),
('city', models.CharField(max_length=30)),
],
),
]
| [
"nayangujarati007@gmail.com"
] | nayangujarati007@gmail.com |
d3ae5cfdef823fb94719c274fa350d2e195544e9 | 5c4b4bb19d98170c20593d1613d6d19e59313bad | /jobs/migrations/0003_remove_formation_center_formation.py | c5abe4e142e0ee5c54e7b3ec36512ed4c0911294 | [] | no_license | Ruldane/djangoportfolio | f339288b5265993448eb0131e2dc51699b956343 | 29a10e909dfdceeb2b338a4a3ae3a9ccf1b31c70 | refs/heads/master | 2023-05-04T07:18:34.047799 | 2019-07-09T07:17:32 | 2019-07-09T07:17:32 | 194,525,404 | 0 | 0 | null | 2023-04-21T20:33:19 | 2019-06-30T14:29:21 | JavaScript | UTF-8 | Python | false | false | 338 | py | # Generated by Django 2.2.2 on 2019-07-01 14:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_auto_20190701_0929'),
]
operations = [
migrations.RemoveField(
model_name='formation',
name='center_formation',
),
]
| [
"l.nizzoli@gmail.com"
] | l.nizzoli@gmail.com |
3449f79d6aacc3a8171e4c6114d0a3787558e126 | 408099135939ccdb7fc52f110792ce651fb6b00a | /test/integration/test_safe_str.py | 33553b62f76336367f32089eddd383ba0c431b46 | [
"BSD-3-Clause"
] | permissive | thomasrockhu/bfg9000 | 757271db484ddcd06e8b391c3b8818882857f66e | 1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a | refs/heads/master | 2022-11-29T00:07:15.914649 | 2020-07-24T21:12:38 | 2020-07-24T21:12:38 | 282,257,111 | 0 | 0 | BSD-3-Clause | 2020-07-24T15:37:41 | 2020-07-24T15:37:40 | null | UTF-8 | Python | false | false | 757 | py | import os.path
import re
from . import *
class TestSafeStr(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__('safe_str', *args, **kwargs)
def test_foo(self):
f = re.escape(os.path.normpath(os.path.join(
test_data_dir, 'safe_str', 'foo.txt'
)))
if env.host_platform.family == 'windows':
f = '"?' + f + '"?'
self.assertRegex(self.build('foo'), r"(?m)^\s*{}$".format(f))
def test_bar(self):
f = re.escape(os.path.normpath(os.path.join(
test_data_dir, 'safe_str', 'bar.txt'
)))
if env.host_platform.family == 'windows':
f = '"?' + f + '"?'
self.assertRegex(self.build('bar'), r"(?m)^\s*{}$".format(f))
| [
"itsjimporter@gmail.com"
] | itsjimporter@gmail.com |
3bd008f64528001d01faec7a3b472224f4ba7860 | 0a691220c5e2c2a9b99d20b2ac62baa1a1f95ca3 | /djangoimposter/blog/migrations/0008_remove_newslettersignup_posts.py | 10a5ff08419047e1068d7a73612c2742ab678690 | [] | no_license | beasyx0/djangoimposter | a1fc945e09148a8cf2ae41d3f96f4c4565a5b94d | b7e06dfe972306179d42b6595e64a461979d32b1 | refs/heads/main | 2023-02-27T10:49:48.087627 | 2021-02-08T18:54:27 | 2021-02-08T18:54:27 | 334,469,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Generated by Django 3.0.11 on 2020-11-26 01:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_newslettersignup_posts'),
]
operations = [
migrations.RemoveField(
model_name='newslettersignup',
name='posts',
),
]
| [
"b_easyx@protonmail.com"
] | b_easyx@protonmail.com |
066839e96acfce157df95fa049f0f67467660b92 | 4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda | /python/test/test_limit_order_reject_transaction.py | 885822ef43b4f506079cdb563b724d4e81e6d98a | [
"MIT"
] | permissive | KoenBal/OANDA_V20_Client | ed4c182076db62ecf7a216c3e3246ae682300e94 | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | refs/heads/master | 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | # coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import oanda
from oanda.models.limit_order_reject_transaction import LimitOrderRejectTransaction # noqa: E501
from oanda.rest import ApiException
class TestLimitOrderRejectTransaction(unittest.TestCase):
"""LimitOrderRejectTransaction unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLimitOrderRejectTransaction(self):
"""Test LimitOrderRejectTransaction"""
# FIXME: construct object with mandatory attributes with example values
# model = oanda.models.limit_order_reject_transaction.LimitOrderRejectTransaction() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"koen.bal@gmail.com"
] | koen.bal@gmail.com |
68435be95d769e029853b2f74d217a894b9240e1 | b00873d36e44128ce30623da0ee3b556e4e3d7e7 | /solutions/solution460.py | 1d8d54d21de25921a3e7605d259659cf37177b17 | [
"MIT"
] | permissive | Satily/leetcode_python_solution | b4aadfd1998877b5086b5423c670750bb422b2c8 | 3f05fff7758d650469862bc28df9e4aa7b1d3203 | refs/heads/master | 2021-07-18T07:53:10.387182 | 2021-07-17T06:30:09 | 2021-07-17T06:30:09 | 155,074,789 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | class DoubleLinkListNode:
def __init__(self, value, previous=None, next=None):
self.value = value
self.previous = previous
self.next = next
class DoubleLinkList:
def __init__(self):
self.head = DoubleLinkListNode(None)
self.tail = DoubleLinkListNode(None)
self.head.next = self.tail
self.tail.previous = self.head
self.size = 0
def push_front(self, value):
p = DoubleLinkListNode(value, self.head, self.head.next)
self.head.next.previous = p
self.head.next = p
self.size += 1
def pop_front(self):
if self.size > 0:
p = self.head.next
self.head.next.next.previous = self.head
self.head.next = p.next
del p
self.size -= 1
def swap_next(self, p):
q = p.next
p.previous.next, q.next.previous = q, p
p.next, q.previous = q.next, p.previous
p.previous, q.next = q, p
def to_list(self):
result = []
p = self.head
for _ in range(self.size):
result.append(p.next.value)
p = p.next
return result
class LFUCache:
def __init__(self, capacity: 'int'):
self.capacity = capacity
self.index = {}
self.pieces = DoubleLinkList()
def __refresh(self, key):
self.index[key].value = (self.index[key].value[0], self.index[key].value[1], self.index[key].value[2] + 1)
while self.index[key].next.value is not None and self.index[key].value[2] >= self.index[key].next.value[2]:
self.pieces.swap_next(self.index[key])
def get(self, key: 'int') -> 'int':
if key in self.index:
self.__refresh(key)
return self.index[key].value[1]
else:
return -1
def put(self, key: 'int', value: 'int') -> 'None':
if self.capacity == 0:
return
if key in self.index:
self.index[key].value = (self.index[key].value[0], value, self.index[key].value[2])
else:
if len(self.index) == self.capacity:
k = self.pieces.head.next.value[0]
del self.index[k]
self.pieces.pop_front()
self.pieces.push_front((key, value, 0))
self.index[key] = self.pieces.head.next
self.__refresh(key)
if __name__ == "__main__":
# ll = DoubleLinkList()
# ll.push_front(1)
# ll.push_front(2)
# ll.push_front(3)
# ll.push_front(4)
# print(ll.to_list())
# ll.swap_next(ll.head.next)
# print(ll.to_list())
# ll.pop_front()
# print(ll.to_list())
# ll.swap_next(ll.head.next.next)
# print(ll.to_list())
cache = LFUCache(2)
cache.put(3, 1)
cache.put(2, 1)
cache.put(2, 2)
cache.put(4, 4)
print(cache.get(2))
# cache.put(1, 5)
# cache.put(2, 6)
# print(cache.get(1))
# cache.put(3, 7)
# print(cache.get(2))
# print(cache.get(3))
# cache.put(4, 8)
# print(cache.get(1))
# print(cache.get(3))
# print(cache.get(4))
| [
"houjiaxu@xiaomi.com"
] | houjiaxu@xiaomi.com |
8cf5581cbf1e59a44ddd2822450b268a05f55c4a | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_xiae_qual.py | adf8463a529680787535a7934c89ca6cfe4f813e | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 4,208 | py | from time import sleep
def a (fname):
fin = open(fname + ".in")
lines = fin.readlines()
T = int(lines[0])
fout = open(fname + ".out", "w")
for t in range(1, T+1):
line = lines[t]
N = int(line)
fout.write("Case #" + str(t) + ": " + sheep(N) + "\n")
def sheep(N):
if N == 0:
return "INSOMNIA"
digits = [0,0,0,0,0,0,0,0,0,0]
i = 1
while 0 in digits:
s = str(i*N)
for c in s:
j = int(c)
if digits[j] == 0:
digits[j] = int(s)
i = i + 1
print(digits)
# input()
return s
def b (fname):
fin = open(fname + ".in")
lines = fin.readlines()
T = int(lines[0])
fout = open(fname + ".out", "w")
for t in range(1, T+1):
d = int(lines[2*t-1])
p = tuple([int(p_i) for p_i in lines[2*t].split()])
fout.write("Case #" + str(t) + ": " + str(minutes(d, p)) + "\n")
print(t)
minute_memos = {}
def minutes(d, p):
if tuple(p) in minute_memos:
return minute_memos[tuple(p)]
p_max = max(p)
if p_max <= 0:
minute_memos[tuple(p)] = 0
return 0
p_next_norm = [max(pi - 1, 0) for pi in p]
p_next_spec = list(p_next_norm)
# if p_max > 1:
# p_next_spec = list(p)
# p_next_spec[p.index(p_max)] = p_max - int(p_max/2)
# p_next_spec.append(int(p_max/2))
# p_next_spec = tuple(p_next_spec)
# minute_memos[p] = 1 + min(minutes(d, p_next_norm), minutes(d + 1, p_next_spec))
if p_max > 1:
p_next_specs = [list(p) for i in range(1, int(p_max/2)+1)]
for i in range(int(p_max/2)):
p_next_specs[i][p.index(p_max)] = p_max - (i+1)
p_next_specs[i].append(i+1)
p_next_specs = [pns for pns in p_next_specs]
p_next_specs.append(p_next_norm)
minute_memos[tuple(p)] = 1 + min([minutes(d, tuple(pn)) for pn in p_next_specs])
return minute_memos[tuple(p)]
minute_memos[tuple(p)] = 1 + minutes(d, tuple(p_next_norm))
return minute_memos[tuple(p)]
def c (fname):
fin = open(fname + ".in")
lines = fin.readlines()
T = int(lines[0])
fout = open(fname + ".out", "w")
for t in range(1, T+1):
l1 = lines[2*t-1].split()
l = l1[0]
x = l1[1]
qs = lines[2*t].strip()
fout.write("Case #" + str(t) + ": " + c_answer(l, x, qs) + "\n")
def simplify_quat(sgn, qpart, qs, x):
if (len(qpart) == 1) and (x == 0):
return (sgn, qpart, qs, 0)
if (len(qpart) == 1):
return simplify_quat(sgn, qpart + qs, qs, x - 1)
a = qpart[0]
b = qpart[1]
mult_table = {('i', 'j'): (True, 'k'), ('i', 'k'): (False, 'j'), ('j', 'k'): (True, 'i'), ('j', 'i'): (False, 'k'),
('k', 'i'): (True, 'j'), ('k', 'j'): (False, ('i'))}
if (a == b):
if (len(qpart) == 2) and (x == 0):
return (not sgn, "1", qs, 0)
if (len(qpart) == 2) and (x > 0):
return simplify_quat(not sgn, qs, qs, x - 1)
return (not sgn, qpart[2:], qs, x)
sgn2, lett = mult_table[(a, b)]
return (sgn2 == sgn, lett + qpart[2:], qs, x)
def c_answer(l, orig_x, orig_qs):
sgn = True
qs = orig_qs
qpart = qs
x = int(orig_x) - 1
x = x%64
while qpart[0] != 'i':
sgn, qpart, qs, x = simplify_quat(sgn, qpart, qs, x)
if len(qpart) + x * len(qs) < 3:
return "NO"
qpart = qpart[1:]
if len(qpart) == 0:
qpart = qs
x = x - 1
while qpart[0] != "j":
sgn, qpart, qs, x = simplify_quat(sgn, qpart, qs, x)
if len(qpart) + x * len(qs) < 2:
return "NO"
qpart = qpart[1:]
if len(qpart) == 0:
qpart = qs
x = x - 1
while qpart[0] != "k":
sgn, qpart, qs, x = simplify_quat(sgn, qpart, qs, x)
if len(qpart) + x * len(qs) == 0:
return "NO"
if len(qpart) == 1 and x == 0:
return "YES"
qpart = qpart[1:]
if len(qpart) == 0:
qpart = qs
x = x - 1
while (len(qpart) != 1 or x != 0):
sgn, qpart, qs, x = simplify_quat(sgn, qpart, qs, x)
if sgn and qpart == "1":
return "YES"
return "NO"
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
1452cf4aa652911b776e92964b67bc0db5ef93c9 | 8cc44eb6cc0fd2e74ce80246ad50e9d510591345 | /q104.py | fa0104621751c2e2c6e7c9babc2422c48b924729 | [] | no_license | gitttttt/lc | 8b6521af300c6903f593b209783560b6b880d8ae | fa1a63cb192666fc6aa5c7c72130993818ea58d0 | refs/heads/master | 2020-05-30T07:12:56.955633 | 2016-12-14T03:16:42 | 2016-12-14T03:16:42 | 68,888,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py |
def maxDepth(root):
return depth(root)
def depth(p):
if p is None:
return 0
else:
return 1 + max(depth(p.left), depth(p.right)) | [
"zhangyifan@neotel.com.cn"
] | zhangyifan@neotel.com.cn |
7b0a2c9636ac9aadd83d2daf106b7cd7bd840f0a | 173ba16165358bdb735ffc275bc9d6c2590df239 | /rootfs/opt/store/componentstore/server.py | 9ed0ed5e1f62a41ba15a5770d724d1d63ca8793a | [
"MIT"
] | permissive | timmo001/custom-component-store | 44c40b687ebb7fca860dbaf6ee69a9f5c35b45d4 | c384e3db99085223cd09c6d95e134882e4810efa | refs/heads/master | 2020-04-18T12:23:27.832985 | 2019-01-24T16:58:23 | 2019-01-24T16:58:23 | 167,531,672 | 1 | 0 | null | 2019-01-25T10:43:09 | 2019-01-25T10:43:09 | null | UTF-8 | Python | false | false | 6,664 | py | """Custom Components componentstore."""
import os
import componentstore.functions.data as data
import componentstore.functions.manager as manager
from aiohttp import web
PATH = '/config'
REASON = None
REDIS_HOST = None
REDIS_PORT = None
NO_CACHE = False
async def error_view(request): # pylint: disable=W0613
"""View for about."""
from componentstore.view.error import view
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Serving error to", requester)
html = await view()
return web.Response(body=html, content_type="text/html", charset="utf-8")
async def about_view(request): # pylint: disable=W0613
"""View for about."""
from componentstore.view.about import view
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Serving about to", requester)
html = await view()
return web.Response(body=html, content_type="text/html", charset="utf-8")
async def installed_components_view(request): # pylint: disable=W0613
"""Default/Installed view."""
from componentstore.view.component.installed import view
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Serving default/Installed view to", requester)
html = await view()
return web.Response(body=html, content_type="text/html", charset="utf-8")
async def the_store_view(request): # pylint: disable=W0613
"""View for 'The Store'."""
from componentstore.view.component.the_store import view
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Serving 'The Store' to", requester)
html = await view()
return web.Response(body=html, content_type="text/html", charset="utf-8")
async def component_view(request):
"""View for single component."""
from componentstore.view.component.component import view
requester = request.headers.get('X-FORWARDED-FOR', None)
component = request.match_info['component']
print("Serving view for", component, "to", requester)
html = await view(component)
return web.Response(body=html, content_type="text/html", charset="utf-8")
async def json(request):
"""Serve the response as JSON."""
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Serving JSON requested by", requester)
try:
component = request.match_info['component']
except:
component = None
json_data = await data.get_data(component=component)
return web.json_response(json_data)
async def install_component(request):
"""Install component"""
component = request.match_info['component']
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Installing/updating", component, "requested by", requester)
await manager.install_component(component)
await data.get_data(True)
raise web.HTTPFound('/component/' + component)
async def uninstall_component(request):
"""Uninstall component"""
component = request.match_info['component']
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Uninstalling", component, "requested by", requester)
await manager.uninstall_component(component)
await data.get_data(True)
raise web.HTTPFound('/component/' + component)
async def migrate_component(request):
"""Migrate component"""
component = request.match_info['component']
requester = request.headers.get('X-FORWARDED-FOR', None)
print("Migrating", component, "requested by", requester)
await manager.migrate_component(component)
await data.get_data(True)
raise web.HTTPFound('/component/' + component)
async def reloadinstalled(request): # pylint: disable=W0613
"""Reload"""
await data.get_data(True)
raise web.HTTPFound('/')
async def reloadstore(request): # pylint: disable=W0613
"""Reload"""
await data.get_data(True)
raise web.HTTPFound('/store')
def run_server(port=9999, redis_host=None, redis_port=None, nocache=False):
"""Run the webserver."""
print("Custom-component-store is starting.")
global REASON # pylint: disable=W0603
global REDIS_HOST # pylint: disable=W0603
global REDIS_PORT # pylint: disable=W0603
global NO_CACHE # pylint: disable=W0603
if redis_host is None:
REDIS_HOST = os.environ.get('REDIS_HOST')
else:
REDIS_HOST = redis_host
if redis_host is None:
REDIS_HOST = os.environ.get('REDIS_PORT')
else:
REDIS_PORT = redis_port
if nocache is None:
NO_CACHE = os.environ.get('NO_CACHE')
else:
NO_CACHE = nocache
directory = PATH + '/custom_components'
version_path = PATH + '/.HA_VERSION'
version = 0
target = 86
app = web.Application()
if not os.path.exists(version_path):
REASON = 'ha_not_found'
elif not os.path.exists(PATH):
REASON = 'no_path'
else:
with open(version_path) as version_file:
version = version_file.readlines()
version = int(version[0].split('.')[1])
if version < target:
REASON = 'version'
print("HA Version", version)
if not os.path.exists(directory):
os.makedirs(directory)
if not NO_CACHE:
redis = data.redis_connect()
if not redis:
REASON = 'redis_conn_error'
else:
print('Cache disabled...')
if REASON is None:
app.router.add_route(
'GET', r'/', installed_components_view)
app.router.add_route(
'GET', r'/about', about_view)
app.router.add_route(
'GET', r'/component/{component}', component_view)
app.router.add_route(
'GET', r'/component/{component}/install', install_component)
app.router.add_route(
'GET', r'/component/{component}/json', json)
app.router.add_route(
'GET', r'/component/{component}/migrate', migrate_component)
app.router.add_route(
'GET', r'/component/{component}/uninstall', uninstall_component)
app.router.add_route(
'GET', r'/component/{component}/update', install_component)
app.router.add_route(
'GET', r'/json', json)
app.router.add_route(
'GET', r'/store', the_store_view)
app.router.add_route(
'GET', r'/reloadinstalled', reloadinstalled)
app.router.add_route(
'GET', r'/reloadstore', reloadstore)
else:
print("There was an issue starting", REASON)
app.router.add_route(
'GET', r'/', error_view)
app.router.add_route(
'GET', r'/{route}', error_view)
web.run_app(app, port=port, print=None)
| [
"joasoe@gmail.com"
] | joasoe@gmail.com |
b5fc68f98779b68dfd10ec49981fc1e010a89da0 | 999e1806302dbc2cd15664dadcd826f701484e33 | /python/send_test/send_test.py | 4ed4c8ca3c9b5e71f8ac04c6112982e8faea96e2 | [] | no_license | gdiazh/gsmA6 | 1086b75857135b810e822ed28281cd4050dfefcb | b21e5ce34a5862fb7ac211ac5266eb016c332118 | refs/heads/master | 2021-01-22T02:24:38.863297 | 2017-06-09T22:46:43 | 2017-06-09T22:46:43 | 92,358,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | #!/usr/bin/python
__author__ = 'gdiaz'
import serial
import syslog
import time
from threading import Timer
#The following line is for serial over GPIO
port = '/dev/ttyACM0'
class GSM(object):
def __init__(self, serial, debug = False):
self.gsm = serial
self.sms = "gms test"
self.config = "AT+"
self.debug = debug
def DEBUG_PRINT(self, msg_type, msg):
if not(self.debug): return
if msg_type == "info":
print chr(27)+"[0;32m"+"[INFO]: "+chr(27)+"[0m" + msg
elif msg_type == "warn":
print chr(27)+"[0;33m"+"[WARN]: "+chr(27)+"[0m" + msg
elif msg_type == "error":
print chr(27)+"[0;31m"+"[ERROR]: "+chr(27)+"[0m" + msg
elif msg_type == "alert":
print chr(27)+"[0;34m"+"[ALERT]: "+chr(27)+"[0m" + msg
else:
print "NON implemented Debug print type"
def read(self):
# data = self.gsm.readline()
data = self.gsm.read(50)
# self.gsm.flush()
self.DEBUG_PRINT("info", data)
def write(self, cmd):
self.gsm.write(cmd.encode("utf-8"))
self.gsm.flush()
if __name__ == '__main__':
# gsm_serial = serial.Serial(port, 115200, timeout=5, stopbits=serial.STOPBITS_ONE)
gsm_serial = serial.Serial(port, 115200, timeout=5)
gsm_hw = GSM(gsm_serial, debug =True)
# AT+CSCS="GSM"
# AT+CMGF=1
# AT+CMGS="+56964696411"
# AT+CMGS="56964696411"
# test read
while True:
cmd = raw_input("cmd:")
if cmd == "end": break
elif cmd == "send":
msg_ = "a"+chr(13)+chr(26)
gsm_hw.write(msg_)
elif cmd == "read":
gsm_hw.read()
else:
gsm_hw.DEBUG_PRINT("warn", "Sending:"+cmd)
gsm_hw.write(cmd+"\r")
gsm_hw.read()
gsm_serial.close() | [
"g.hernan.diaz@gmail.com"
] | g.hernan.diaz@gmail.com |
a0a2146a30dad6e1f54fd0d71dd4dcacf54ccb1b | de40d3fa8d8af0030556d27d6833f6a1a0e7700c | /baekjoon/2997py/a.py | aada5f1e5103ceb13d3105a2b27342fdf07e1344 | [] | no_license | NeoMindStd/CodingLife | cd6a627209c0353f4855f09fd5dfef8da4bbfef6 | bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3 | refs/heads/master | 2022-12-24T10:42:45.390085 | 2022-12-11T16:27:16 | 2022-12-11T16:27:16 | 191,797,634 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | l=sorted(map(int,input().split()))
a,b=l[1]-l[0],l[2]-l[1]
if a>b:print((l[1]+l[0])//2)
elif a<b:print((l[2]+l[1])//2)
else:print(l[2]+a)
| [
"dwj1996@naver.com"
] | dwj1996@naver.com |
64231fcd94ba73210d9850308b49dbe60f0018eb | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/compute/azure-mgmt-avs/generated_samples/workload_networks_list_dns_zones.py | 58293e5eda627766a3a6b37e3d2f8b382cabd137 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,572 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.avs import AVSClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-avs
# USAGE
python workload_networks_list_dns_zones.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AVSClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.workload_networks.list_dns_zones(
resource_group_name="group1",
private_cloud_name="cloud1",
)
for item in response:
print(item)
# x-ms-original-file: specification/vmware/resource-manager/Microsoft.AVS/stable/2023-03-01/examples/WorkloadNetworks_ListDnsZones.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
b6309c087d80ab58f92700a063c928d82024d4cf | 9b01f7d430f7ee87217618cfa4567f42635e8923 | /22-06-2017/cloudformation/nginx-demo-1/ansible/.env/bin/cq | 0447234638fdac9ed0daa9425fd3da9c7b199de7 | [] | no_license | awsusergroupsantiago/demos | ccb045545d2a407a39d865cf19800d2b6d284b8f | e7f0dc8d9a4e8f2547c33a5a294fd76bf3ac9c9c | refs/heads/master | 2022-04-30T23:43:30.646556 | 2020-08-08T01:35:40 | 2020-08-08T01:35:40 | 95,129,959 | 2 | 0 | null | 2022-03-29T21:54:09 | 2017-06-22T15:29:25 | Python | UTF-8 | Python | false | false | 3,139 | #!/Users/groupon/Desktop/meetups/AWSUserGroup/22-06-2017/cloudformation/nginx-demo/ansible/.env/bin/python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import getopt, sys
import boto.sqs
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
def usage():
print 'cq [-c] [-q queue_name] [-o output_file] [-t timeout] [-r region]'
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:r:',
['help', 'clear', 'queue=',
'output=', 'timeout=', 'region='])
except:
usage()
sys.exit(2)
queue_name = ''
output_file = ''
timeout = 30
region = ''
clear = False
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
if o in ('-q', '--queue'):
queue_name = a
if o in ('-o', '--output'):
output_file = a
if o in ('-c', '--clear'):
clear = True
if o in ('-t', '--timeout'):
timeout = int(a)
if o in ('-r', '--region'):
region = a
if region:
c = boto.sqs.connect_to_region(region)
if c is None:
print 'Invalid region (%s)' % region
sys.exit(1)
else:
c = SQSConnection()
if queue_name:
try:
rs = [c.create_queue(queue_name)]
except SQSError as e:
print 'An Error Occurred:'
print '%s: %s' % (e.status, e.reason)
print e.body
sys.exit()
else:
try:
rs = c.get_all_queues()
except SQSError as e:
print 'An Error Occurred:'
print '%s: %s' % (e.status, e.reason)
print e.body
sys.exit()
for q in rs:
if clear:
n = q.clear()
print 'clearing %d messages from %s' % (n, q.id)
elif output_file:
q.dump(output_file)
else:
print q.id, q.count(vtimeout=timeout)
if __name__ == "__main__":
main()
| [
"eduardo.miranda.becerra3@gmail.com"
] | eduardo.miranda.becerra3@gmail.com | |
f65f8080650a094b4947977b4b43b37a56e3a82e | 353fb13f4e8a0d45cd0586f26258276731dcdd3a | /src/octras/algorithm.py | 722156a44307e69285d7f3cd988007a607c12ecf | [] | no_license | sebhoerl/octras | b4b450d41e109978119ac74601716e2429b9e79f | 65cbc8530ef65ab210bcc97fe756a12982593dc3 | refs/heads/develop | 2023-08-01T05:37:38.358657 | 2021-07-28T14:15:24 | 2021-07-28T14:15:24 | 326,031,485 | 0 | 2 | null | 2021-07-28T14:15:25 | 2021-01-01T18:12:33 | Python | UTF-8 | Python | false | false | 259 | py | from octras import Evaluator
class Algorithm:
#def set_state(self, state):
# raise NotImplementedError()
#def get_state():
# raise NotImplementedError()
def advance(self, evaluator: Evaluator):
raise NotImplementedError()
| [
"hoerl.sebastian@gmail.com"
] | hoerl.sebastian@gmail.com |
31636f18fe8ab92996ea388d7c22b23f5b10a8df | 5508368df9df1036755aeaa8574bdadcc25359f8 | /AmadoFinance/migrations/0064_auto_20190123_1538.py | 41fd6a6b8c26b19b034b34623f6f52583a6deae7 | [] | no_license | HanSol1994/Amado | 4627a6165009733059e8e87f545244f69d492b91 | 9fbc37250b9974bbf3a3c3a2571a748a300b2d29 | refs/heads/master | 2022-01-23T23:59:30.666584 | 2019-07-23T08:12:19 | 2019-07-23T08:12:19 | 198,373,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # Generated by Django 2.1.2 on 2019-01-23 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AmadoFinance', '0063_auto_20190123_1537'),
]
operations = [
migrations.AlterField(
model_name='recedeimage',
name='cost',
field=models.IntegerField(default=0, null=True, verbose_name='مبلغ رسید(ریال)'),
),
]
| [
"haansol@gmail.com"
] | haansol@gmail.com |
9bb099363a94879ff08830e4559b20747ed7f145 | 7162c7fa1433f8bacc666e611241b32232ef3792 | /src/interrogate/__init__.py | 86ad91c9accf6cd0276684aaafa9a042b96137ee | [
"CC-BY-3.0",
"MIT"
] | permissive | econchick/interrogate | ff4e04e9a4a677a8dd694599e39705f80a5c3ad9 | 1e74611fc5296b0572b6bb11b480d43242c4ec49 | refs/heads/master | 2023-04-11T11:54:42.297583 | 2022-07-29T16:12:10 | 2022-07-29T16:12:10 | 258,385,030 | 497 | 49 | MIT | 2023-09-10T13:43:00 | 2020-04-24T02:33:25 | Python | UTF-8 | Python | false | false | 296 | py | # Copyright 2020-2021 Lynn Root
"""Explain yourself! Interrogate a codebase for docstring coverage."""
__author__ = "Lynn Root"
__version__ = "1.5.0"
__email__ = "lynn@lynnroot.com"
__description__ = "Interrogate a codebase for docstring coverage."
__uri__ = "https://interrogate.readthedocs.io"
| [
"lynn@lynnroot.com"
] | lynn@lynnroot.com |
dd3d90043cc6a29286eed3aea2b6a123c267f7b2 | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/04_Plot/Sierpinski_Chaos.py | ee836df99875677b3d364b17ac50293452a7e536 | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | #!/usr/bin/env python
%matplotlib inline #!
import random
import numpy as np
import time
import pylab as pl #!
from IPython import display #!
import numpy as np
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
def Sierpinski_Chaos(gp=None, n=None):
"""This Choase game shows how randomness can lead to structure:
1. 3 points have to be drawn in the plane
2. Mark a triangle with these points with pl.plot
3. draw a random point outside this triangle - this is the fiest 'Game-Point' (gp)
Repeat til n:
4. Choose a randomly a base-point out of three corner points
5. Build the vector between the 'Game-Point' and the randomly chosen base-point and make a point (scatter) half way to the base-point
"""
#1,2.
pl.plot([0,4,2,0],[0,0,4,0])
pl.xlim(0,4)
pl.ylim(0,4)
base_points = [[0,0],[4,0],[2,4]]
if gp==None:
gp = np.array([5,5]) #starting game_point
if n==None:
n = 500 #number of interactions
for n in range(500):
gp_log = gp.copy()
pl.scatter(gp[0], gp[1], lw='O', s=20) #3
pl.xlim(0,4) #!
pl.ylim(0,4) #!
pl.draw() #!
display.clear_output(wait=True) #!
display.display(pl.gcf()) #!
time.sleep(0.0000005) #!
#4
fort_wheel = random.choice(base_points)
rand_base = np.array(fort_wheel)
#5
gp = gp - 1.0 / 2 * (gp-rand_base)
gp_log = np.concatenate((gp_log, gp))
# (gp-rand_base) is "direction-vector" starting from the gp and just walking half way leads to new gp
return gp_log
build_hipster = Sierpinski_Chaos(n=1500)
| [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
c9abcad5cdb6ef69352cbd95248ec7e04972791e | d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1 | /BioinformaticsTextbookTrack/chap1/ba1a/ba1a.py | 4b37b97fac93445af48b0caa55dc70dc62ed1ea7 | [] | no_license | dswisher/rosalind | d6af5195cdbe03adb5a19ed60fcbf8c05beac784 | 4519740350e47202f7a45ce70e434f7ee15c6afc | refs/heads/master | 2021-08-09T02:58:17.131164 | 2017-11-12T01:26:26 | 2017-11-12T01:26:26 | 100,122,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
import sys
if len(sys.argv) < 2:
print "You must specify the name of the file to load!"
sys.exit(1)
with open(sys.argv[1], "r") as file:
seq = file.readline().strip()
pat = file.readline().strip()
count = 0
plen = len(pat)
for i in xrange(len(seq)):
if seq[i:i + plen] == pat:
count += 1
print count
| [
"big.swish@gmail.com"
] | big.swish@gmail.com |
20243f291eaf9650f369ab3ad68e3f4032d02e33 | 0ba804704961a64781266af01d946432881d2b36 | /TDDTest/lists/migrations/0002_item_text.py | fedb70043dfdb2410ae7f281706cdf8f6d94945e | [] | no_license | aimiliya/spider | 6a57056da8f10498933c4383d4a63d92928e7398 | 86b39439aa50db15743cfa053a16b9c358694bdd | refs/heads/master | 2020-04-07T19:47:24.685812 | 2018-11-14T13:28:25 | 2018-11-14T13:28:25 | 158,662,720 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Generated by Django 2.1.2 on 2018-11-14 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='text',
field=models.TextField(default=''),
),
]
| [
"951416267@qq.com"
] | 951416267@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.