blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a43b98ce7d6db87225964d9ba742eaf038b8d86
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02922/s743734416.py
|
5689127034e497825c363cb031a305c095bb9ae7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
a,b = list(map(int,input().split()))
sasikomi = 1
tap = 0
while(sasikomi < b):
tap = tap + 1
sasikomi = sasikomi + a - 1
print(tap)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
33cade64ce28179008d72828added401b0e8d66f
|
5bc8468029a6e1dd0cc75d480a34e646bed64ea2
|
/khabarkoi/asgi.py
|
55dbeb1a0b29913165098f6c260591c4e0f53001
|
[] |
no_license
|
ziaurjoy/Basic-API
|
c1804749593fb8b0bab3df28550280e2ffa27999
|
e7a65b4a0778cd21ff08b3a0a529358c3ef288a8
|
refs/heads/master
| 2021-05-19T10:18:43.941612
| 2020-09-07T14:20:25
| 2020-09-07T14:20:25
| 251,534,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for khabarkoi project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'khabarkoi.settings')
application = get_asgi_application()
|
[
"ziaurjoy802@gmail.com"
] |
ziaurjoy802@gmail.com
|
091130c0662fd0844401f41341fb6248ddc1ebe2
|
2e4f1df3835ab1a0eba70a16800a836b05b82404
|
/gst/get_group_info.py
|
c99105cf572285605dcf161865f88b2aa923f19c
|
[
"MIT"
] |
permissive
|
valohai/g-suite-tools
|
a90fbdaf29815ac76e2f6845c40e8fd421bca076
|
cbf69606ed992646e652261187a34f983491db98
|
refs/heads/master
| 2022-12-10T14:14:14.391443
| 2021-02-08T20:36:36
| 2021-02-26T10:45:57
| 222,693,411
| 1
| 0
|
MIT
| 2022-12-08T06:54:43
| 2019-11-19T12:41:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
import argparse
import json
import sys
from typing import List
import tqdm
from gst.credentials import get_directory_read_client
from gst.utils import get_paginated
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--domain", required=True)
ap.add_argument("--write-json", type=argparse.FileType(mode="w"))
args = ap.parse_args()
directory_client = get_directory_read_client()
domain = args.domain
groups = get_domain_groups(directory_client, domain)
print(f"{len(groups)} groups.")
populate_groups_members(directory_client, groups)
if args.write_json:
json.dump(groups, args.write_json)
print(f"Wrote JSON to {args.write_json}", file=sys.stderr)
def populate_groups_members(directory_client, groups: List[dict]) -> None:
for group in tqdm.tqdm(groups, desc="retrieving members"):
group["_members"] = members = []
for members_resp in get_paginated(
directory_client.members().list, {"groupKey": group["id"]}
):
members.extend(members_resp["members"])
def get_domain_groups(directory_client, domain) -> List[dict]:
groups = []
for groups_resp in tqdm.tqdm(
get_paginated(directory_client.groups().list, {"domain": domain}),
desc="retrieving groups",
):
groups.extend(groups_resp["groups"])
return groups
if __name__ == "__main__":
main()
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
a2c147f4468186f6fc2e6530df517fe80b15286b
|
3e59c64c78aa3ffc4ca6ee358ee1a3ba61e2d4af
|
/scripts/fb15k/fb15k_v1.py
|
a899647b39d3c73ffe3916529dba454b4a2f075c
|
[
"MIT"
] |
permissive
|
pminervini/DeepKGC
|
de35f75fac9c64ca6e09e4ab244552792669678d
|
ed55d0a28d7607324def7c48ebde98786c11d5e1
|
refs/heads/master
| 2016-09-06T02:36:47.748324
| 2015-07-06T12:35:07
| 2015-07-06T12:35:07
| 38,617,255
| 5
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
#!/usr/bin/python -uB
# -*- coding: utf-8 -*-
# Classes of methods
u_vers = ['Unstructured']
base_vers = ['TransE', 'ScalE']
scaltrans_vers = ['ScalTransE']
xi_vers = ['XiTransE', 'XiScalE']
semixi_vers = ['XiScalTransSE', 'XiTransScalSE']
xiscaltrans_vers = ['XiScalTransE']
simple_method_set = base_vers + xi_vers
sim_set = ['L1', 'L2', 'dot']
u_sim_set = ['L2_sqr']
margin_set = [1, 2, 5, 10]
ndim_set = [20, 50, 100, 200, 300]
nhid_set = [20, 50, 100, 200, 300]
epochs = 100
nbatches = 10
lr = 0.1
seed = 123
train_path = 'data/fb15k/FB15k-train.pkl'
valid_path = 'data/fb15k/FB15k-valid.pkl'
test_path = 'data/fb15k/FB15k-test.pkl'
# ADAGRAD
# def adagrad(param, rate, epsilon, gradient, updates, param_squared_gradients):
c, method = 0, 'ADAGRAD'
# def adagrad(param, rate, epsilon, gradient, updates, param_squared_gradients):
cmd_adagrad = ('./learn_parameters.py --seed=%d --strategy=%s --totepochs=%d --test_all=%d --lr=%f --name=fb15k/fb15k_%s_%d '
' --train=%s --valid=%s --test=%s --nbatches=%d --no_rescaling --filtered '
' --op=%s --sim=%s --ndim=%d --nhid=%d --margin=%d' # varying params
' > logs/fb15k/fb15k.%s_%s_%d_%d_%d_%d.log 2>&1')
for op in simple_method_set:
for sim in sim_set:
for ndim in ndim_set:
nhid = ndim
for margin in margin_set:
print(cmd_adagrad % (seed, method, epochs, epochs, lr, op, c, train_path, valid_path, test_path, nbatches, op, sim, ndim, nhid, margin, op, sim, ndim, nhid, margin, c))
c += 1
for op in u_vers:
for sim in u_sim_set:
for ndim in ndim_set:
nhid = ndim
for margin in margin_set:
print(cmd_adagrad % (seed, method, epochs, epochs, lr, op, c, train_path, valid_path, test_path, nbatches, op, sim, ndim, nhid, margin, op, sim, ndim, nhid, margin, c))
c += 1
|
[
"p.minervini@gmail.com"
] |
p.minervini@gmail.com
|
6a9f8fdd88d5f755ba31aad1901de75f491abbc4
|
56ffce29f0d27f83206e11870d95982c38524aae
|
/apweb/database_test.py
|
774897fdb8bf636f8846c81eeb4ba36319442e8b
|
[] |
no_license
|
adamandpaul/apweb
|
cce365085e2ee58cfbc31544c5a7414e67ad56b4
|
b1bb81fa7d7b39f19e187462aa3447ff482b46af
|
refs/heads/master
| 2022-10-19T02:09:52.437906
| 2021-05-21T06:10:08
| 2021-05-21T06:10:08
| 201,398,036
| 0
| 3
| null | 2022-09-21T21:39:41
| 2019-08-09T05:41:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
# -*- coding: utf-8 -*-
from . import database
from unittest import TestCase
from unittest.mock import MagicMock
from unittest.mock import patch
import pyramid.events
class TestDatabaseConfiguration(TestCase):
@patch("zope.sqlalchemy.register")
def test_db_session_from_request(self, zope_register):
request = MagicMock()
expected_db_session = request.registry["db_session_factory"].return_value
database.db_session_from_request(request)
zope_register.assert_called_with(
expected_db_session, transaction_manager=request.tm
)
@patch("sqlalchemy.orm.configure_mappers")
def test_run_orm_configure_mappers(self, configure_mappers):
database.run_orm_configure_mappers(None)
configure_mappers.assert_called_with()
@patch("sqlalchemy.engine_from_config")
@patch("sqlalchemy.orm.sessionmaker")
def test_includeme(self, sessionmaker, engine_from_config):
config = MagicMock()
config.registry = {}
database.includeme(config)
engine_from_config.assert_called_with(
config.get_settings.return_value, "sqlalchemy."
)
db_engine = engine_from_config.return_value
db_session_factory = sessionmaker.return_value
db_session_factory.configure.assert_called_with(bind=db_engine)
self.assertEqual(config.registry["db_engine"], db_engine)
self.assertEqual(config.registry["db_session_factory"], db_session_factory)
config.add_subscriber.assert_called_with(
database.run_orm_configure_mappers, pyramid.events.ApplicationCreated
)
|
[
"arterrey@gmail.com"
] |
arterrey@gmail.com
|
f12dde725368f102ef5b15f678c55c17078d4208
|
ba80ca143ba35fd481730786a27ebdb1f88ce835
|
/algorithm/codility/3_permMissingElem.py
|
063ff53aa8c43c233ca38ae1c10147b9ceb9eec0
|
[] |
no_license
|
uiandwe/TIL
|
c541020b65adc53578aeb1c3ba4c6770b3b2e8b3
|
186544469374dd0279099c6c6aa7555ee23e42fe
|
refs/heads/master
| 2022-02-15T08:33:07.270573
| 2022-01-01T15:22:54
| 2022-01-01T15:22:54
| 63,420,931
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
l = len(A)
if l <= 0:
return 0
if l == 1 and A[0] != 1:
return 1
s = (l+1) * (l+1+1) // 2
return s - sum(A)
assert solution([1, 3]) == 2
assert solution([2, 3, 1, 5]) == 4
assert solution([]) == 0
assert solution([2]) == 1
|
[
"noreply@github.com"
] |
uiandwe.noreply@github.com
|
f95bb584108c6caacc35789f70cb634acd8bfcee
|
b4d7fbbd5ba7d73e2a0ed183e76c55a1e8f68996
|
/django/db/migrations/optimizer.py
|
9c9613bb370a0f1b0c58bfb3a37e6e9093de4e20
|
[
"BSD-3-Clause"
] |
permissive
|
ikebrown/django
|
a6ae06946c18c39800dfb20d182da2f8fad4df99
|
8be832b262432081be297d0274ef1ab964a9bcea
|
refs/heads/master
| 2021-01-18T05:52:46.780219
| 2013-10-21T18:40:45
| 2013-10-21T18:40:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
from django.db import migrations
class MigrationOptimizer(object):
"""
Powers the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
For example, a CreateModel and an AddField can be optimised into a
new CreateModel, and CreateModel and DeleteModel can be optimised into
nothing.
"""
def optimize(self, operations):
"""
Main optimization entry point. Pass in a list of Operation instances,
get out a new list of Operation instances.
Unfortunately, due to the scope of the optimisation (two combinable
operations might be separated by several hundred others), this can't be
done as a peephole optimisation with checks/output implemented on
the Operations themselves; instead, the optimizer looks at each
individual operation and scans forwards in the list to see if there
are any matches, stopping at boundaries - operations which can't
be optimized over (RunSQL, operations on the same field/model, etc.)
The inner loop is run until the starting list is the same as the result
list, and then the result is returned. This means that operation
optimization must be stable and always return an equal or shorter list.
"""
# Internal tracking variable for test assertions about # of loops
self._iterations = 0
while True:
result = self.optimize_inner(operations)
self._iterations += 1
if result == operations:
return result
operations = result
def optimize_inner(self, operations):
"""
Inner optimization loop.
"""
new_operations = []
for i, operation in enumerate(operations):
# Compare it to each operation after it
for j, other in enumerate(operations[i+1:]):
result = self.reduce(operation, other)
if result is not None:
# Optimize! Add result, then remaining others, then return
new_operations.extend(result)
new_operations.extend(operations[i+1:i+1+j])
new_operations.extend(operations[i+j+2:])
return new_operations
if not self.can_optimize_through(operation, other):
new_operations.append(operation)
break
else:
new_operations.append(operation)
return new_operations
#### REDUCTION ####
def reduce(self, operation, other):
"""
Either returns a list of zero, one or two operations,
or None, meaning this pair cannot be optimized.
"""
submethods = [
(migrations.CreateModel, migrations.DeleteModel, self.reduce_model_create_delete),
(migrations.AlterModelTable, migrations.DeleteModel, self.reduce_model_alter_delete),
(migrations.AlterUniqueTogether, migrations.DeleteModel, self.reduce_model_alter_delete),
(migrations.AlterIndexTogether, migrations.DeleteModel, self.reduce_model_alter_delete),
]
for ia, ib, om in submethods:
if isinstance(operation, ia) and isinstance(other, ib):
return om(operation, other)
return None
def reduce_model_create_delete(self, operation, other):
"""
Folds a CreateModel and a DeleteModel into nothing.
"""
if operation.name == other.name:
return []
return None
def reduce_model_alter_delete(self, operation, other):
"""
Folds an AlterModelSomething and a DeleteModel into nothing.
"""
if operation.name == other.name:
return [other]
return None
#### THROUGH CHECKS ####
def can_optimize_through(self, operation, other):
"""
Returns True if it's possible to optimize 'operation' with something
the other side of 'other'. This is possible if, for example, they
affect different models.
"""
return False
|
[
"andrew@aeracode.org"
] |
andrew@aeracode.org
|
5768b4aa944cf0a267a5d51df9863b7719905cde
|
c57376701537dc6969939c3afb51d542d670db61
|
/String/string_4.py
|
e6561f80c80b07d1636fe290b6c22921b0d65b70
|
[] |
no_license
|
dangnam739/Learn_Python_Begin
|
d3f5f24504b3c703de4e981abb432f3734558e5d
|
81764172475f26374a3e21d150395a99e8a183e6
|
refs/heads/master
| 2021-07-15T19:13:58.156215
| 2020-08-21T06:38:04
| 2020-08-21T06:38:04
| 200,081,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
###Cac phuong thuc chuoi #String Method#
a = '12'
print(int(a))
print(type(a))
#capitalize: ra ve chuoi co ky tu dau duoc viet hoa, # cac chu cai con lai viet thuong
a = 'how kteam - free education'
b = a.capitalize()
print(b)
#upper : Viet hoa tat ca cac ky tu
b = a.upper()
print(b)
#lower : viet thuong tat ca ca ky tu
b = a.lower()
print(b)
#swapcase : Chu viet thuong -> viet hoa, viet hoa->viet thuong
b = a.swapcase()
print(b)
#title : Viet hoa chu cai dau cua cac tu, con lai chu thuong
b = a.title()
print(b)
#center(width,[fillchar]): can giua mot chuoi voi do rong width, can giua bang ki tu fillchar<chuoi co do dai bang 1>
b = a.center(50, '*')
print(b)
#rjust : (width,[fillchar]): can phai mot chuoi voi do rong width, can giua bang ki tu fillchar<chuoi co do dai bang 1>
b = a.rjust(50, '.')
print(b)
#ljust : (width,[fillchar]): can trai mot chuoi voi do rong width, can giua bang ki tu fillchar<chuoi co do dai bang 1>
b = a.ljust(50, '.')
print(b)
##Phuong thuc xu ly
#encode : ma hoa chuoi
b = a.encode(encoding='utf-8', errors='strict')
print(b)
b = a.encode()
print(b)
#join : cong chuoi, nho mot danh sach
b = a.join([' -1-', '-2- ', '-3- '])
print(b)
#replace: thay the mot chuoi bang mot chuoi moi
b = a.replace('o', 'K') #thay the toan bo
print(b)
b = a.replace('o', 'K', 1) #thay the 1 chuoi dau tien
print(b)
#strip(): xoa het khoan trang o hai dau, bo di cac ki tu char' ', va ms escape sequence
#strip('<ki tu>'): xoa bo <kiu tu> o hai dau chuoi
a = ' how kteam \n'
print(a)
b = a.strip()
print(b)
a = 'how kteamh'
b = a.strip('h') #co the cat bo chuoi a.strip('ho')
print(b)
#lstrip(): cat ben phai
#rstrip(): cat ben trai
|
[
"47108512+ChrisZangNam@users.noreply.github.com"
] |
47108512+ChrisZangNam@users.noreply.github.com
|
919567ff88adb1a6e3f2a56cc4b28afd54153228
|
419572051aedc42fec6d1a8ec7b90b3cd0ba6637
|
/pandaharvester/harvestercredmanager/no_voms_cred_manager.py
|
6af30bb92a1925dc5294970415213a96cac0d2d8
|
[
"Apache-2.0"
] |
permissive
|
wyang007/panda-harvester
|
e5c43688be5ebee24860b1a7c7b2c241ae2bc4ac
|
c24d75eabc57b14779e72b00a65162db341c99f9
|
refs/heads/master
| 2021-04-27T03:11:25.832083
| 2018-02-26T08:11:30
| 2018-02-26T08:11:30
| 122,710,771
| 0
| 0
| null | 2018-02-24T06:28:38
| 2018-02-24T06:28:38
| null |
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
import subprocess
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore import core_utils
# logger
_logger = core_utils.setup_logger('no_voms_cred_manager')
# credential manager with no-voms proxy
class NoVomsCredManager(PluginBase):
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
# check proxy
def check_credential(self):
# make logger
mainLog = core_utils.make_logger(_logger, method_name='check_credential')
comStr = "voms-proxy-info -exists -hours 72 -file {0}".format(self.outCertFile)
mainLog.debug(comStr)
try:
p = subprocess.Popen(comStr.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
except:
core_utils.dump_error_message(mainLog)
return False
mainLog.debug('retCode={0} stdOut={1} stdErr={2}'.format(retCode, stdOut, stdErr))
return retCode == 0
# renew proxy
def renew_credential(self):
# make logger
mainLog = core_utils.make_logger(_logger, method_name='renew_credential')
comStr = "voms-proxy-init -rfc -voms {0} -out {1} -valid 96:00 -cert={2}".format(self.voms,
self.outCertFile,
self.inCertFile)
mainLog.debug(comStr)
try:
p = subprocess.Popen(comStr.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
mainLog.debug('retCode={0} stdOut={1} stdErr={2}'.format(retCode, stdOut, stdErr))
except:
stdOut = ''
stdErr = core_utils.dump_error_message(mainLog)
retCode = -1
return retCode == 0, "{0} {1}".format(stdOut, stdErr)
|
[
"tmaeno@bnl.gov"
] |
tmaeno@bnl.gov
|
ac8b0c1856a22c93b5d665b62610ebd8af083ae9
|
4962f934b0e94505a95ae50903cab5a9327171fc
|
/jsonhash/__init__.py
|
82a550b4de60873d457716166d251054eedbc86c
|
[
"Apache-2.0"
] |
permissive
|
FlorianLudwig/jsonhash
|
f0d86267c94f6ee1de74b22c680baabf28c78dac
|
aae53363c3eba867189ea5ac774bcef251e54dc2
|
refs/heads/master
| 2021-01-10T17:21:32.048467
| 2016-04-07T19:25:00
| 2016-04-07T19:25:00
| 50,778,882
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
import json
import hashlib
def hash(object, algorithm=None):
"""return hash object for object
object might be None, int, float, dict or array
"""
if algorithm is None:
algorithm = hashlib.sha256
data_string = json.dumps(object,
skipkeys=False,
ensure_ascii=False,
check_circular=True,
allow_nan=True,
cls=None,
indent=None,
separators=(',', ':'),
encoding="utf-8",
default=None,
sort_keys=True)
return algorithm(data_string)
|
[
"f.ludwig@greyrook.com"
] |
f.ludwig@greyrook.com
|
3ce5571b618328f51bd78d5786b8820045c53ba1
|
9620337c5ce9294ebc0e29aafa16aa2545fd3afa
|
/Square.py
|
3b369a9eded8898aebc55ffcf26ea3dad96ec64c
|
[] |
no_license
|
balajimanikandanm/balajib
|
f482840ac3491b26b9315fc9f1e16e6616317638
|
d065305ca7c104f7a140b4a6f09b4f747f9ac90d
|
refs/heads/master
| 2021-06-08T16:51:52.639877
| 2019-07-16T03:59:21
| 2019-07-16T03:59:21
| 95,753,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
n=int(input())
if(10<=n<100):
a=int(n/10)
b=int(n%10)
c=int((a*a)+(b*b))
print(c)
elif(100<=n<1000):
a=int(n%100)
b=int(a%10)
c=int(a/10)
d=int(n/100)
e=int((d*d)+(b*b)+(c*c))
print(e)
|
[
"noreply@github.com"
] |
balajimanikandanm.noreply@github.com
|
c510a7d18152ae7d067c151c030d1a80cca96576
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03778/s222149305.py
|
ada8009e48476f017c9593ae7a5970d246338c8b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
#!/usr/bin/env python3
w,a,b=map(int,input().split())
if b>a:
if b-w-a>0:
print(b-w-a)
else:
print(0)
if a>b:
if a-w-b>0:
print(a-w-b)
else:
print(0)
if a==b:
print(0)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ab518993e7504fd0345f8ff85893401197724473
|
369e260e100db9ab5cc8b1711e99ef5e49aec173
|
/ml/m09_selectModel2.py
|
8500764c686e4ff32f05ffd8638b5b574a0f8beb
|
[] |
no_license
|
HWALIMLEE/study
|
7aa4c22cb9d7f7838634d984df96eed75f7aefea
|
8336adc8999126258fe328d6b985a48e32667852
|
refs/heads/master
| 2023-03-26T09:11:19.606085
| 2021-03-29T23:03:04
| 2021-03-29T23:03:04
| 259,555,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
#보스턴 모델링 하시오
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.utils.testing import all_estimators #26개 모델 한방에 돌려버림
import warnings
from sklearn.metrics import r2_score
warnings.filterwarnings('ignore') #warnings이라는 에러 그냥 넘어가겠다
boston=pd.read_csv('./data/csv/boston_house_prices.csv',header=1)
x=boston.iloc[:,0:13] #0,1,2,3
y=boston.iloc[:,13]
#numpy일 때는 그냥 슬라이싱 해주어도 된다
print("x:",x)
print("y:",y)
warnings.filterwarnings('ignore') #warnings이라는 에러 그냥 넘어가겠다
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=44)
#3. 모델
warnings.filterwarnings('ignore') #warnings이라는 에러 그냥 넘어가겠다
allAlgorithms = all_estimators(type_filter='regressor') #분류모델 싹 다 가져옴-->멋진 아이임
for (name,algorithm) in allAlgorithms: #name과 algorithm이 반환값
model=algorithm()
model.fit(x_train,y_train)
y_pred = model.predict(x_test)
score=model.score(x_test,y_test)
print(name,"의 정답률",score)
import sklearn
print(sklearn.__version__)
#sklearn버전 낮추면 all_estimators 정상 작동
#커밋수정
|
[
"hwalim9612@gmail.com"
] |
hwalim9612@gmail.com
|
a6d54c808fed3224faeaa67a6f4784fc46e699fd
|
bc441bb06b8948288f110af63feda4e798f30225
|
/flowable_sdk/model/ops_automation/job_tasks_pb2.pyi
|
dc12a5542a2e6c124251065644a1dc8ee7723caf
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,477
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from flowable_sdk.model.ops_automation.mail_info_pb2 import (
MailInfo as flowable_sdk___model___ops_automation___mail_info_pb2___MailInfo,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class JobTasks(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
jobId = ... # type: typing___Text
jobName = ... # type: typing___Text
menuName = ... # type: typing___Text
execId = ... # type: typing___Text
resourceType = ... # type: typing___Text
resourceId = ... # type: typing___Text
resourceVId = ... # type: typing___Text
resourceVName = ... # type: typing___Text
trigger = ... # type: typing___Text
execUser = ... # type: typing___Text
hosts = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
status = ... # type: typing___Text
successRate = ... # type: builtin___float
error = ... # type: typing___Text
createTime = ... # type: typing___Text
updateTime = ... # type: typing___Text
creator = ... # type: typing___Text
org = ... # type: builtin___int
@property
def mail(self) -> flowable_sdk___model___ops_automation___mail_info_pb2___MailInfo: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
jobId : typing___Optional[typing___Text] = None,
jobName : typing___Optional[typing___Text] = None,
menuName : typing___Optional[typing___Text] = None,
execId : typing___Optional[typing___Text] = None,
resourceType : typing___Optional[typing___Text] = None,
resourceId : typing___Optional[typing___Text] = None,
resourceVId : typing___Optional[typing___Text] = None,
resourceVName : typing___Optional[typing___Text] = None,
trigger : typing___Optional[typing___Text] = None,
execUser : typing___Optional[typing___Text] = None,
hosts : typing___Optional[typing___Iterable[typing___Text]] = None,
status : typing___Optional[typing___Text] = None,
mail : typing___Optional[flowable_sdk___model___ops_automation___mail_info_pb2___MailInfo] = None,
successRate : typing___Optional[builtin___float] = None,
error : typing___Optional[typing___Text] = None,
createTime : typing___Optional[typing___Text] = None,
updateTime : typing___Optional[typing___Text] = None,
creator : typing___Optional[typing___Text] = None,
org : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> JobTasks: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> JobTasks: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"mail",b"mail"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"createTime",b"createTime",u"creator",b"creator",u"error",b"error",u"execId",b"execId",u"execUser",b"execUser",u"hosts",b"hosts",u"id",b"id",u"jobId",b"jobId",u"jobName",b"jobName",u"mail",b"mail",u"menuName",b"menuName",u"org",b"org",u"resourceId",b"resourceId",u"resourceType",b"resourceType",u"resourceVId",b"resourceVId",u"resourceVName",b"resourceVName",u"status",b"status",u"successRate",b"successRate",u"trigger",b"trigger",u"updateTime",b"updateTime"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
8000e75d46ed9abd3bbb9b77c384719a41849c7a
|
e54993bb28e72a147e513038a6ad938fcaecc5c6
|
/2018/6-2.py
|
a15fe749802ad0ba3658f1173718aea63542470a
|
[] |
no_license
|
desecho/adventofcode
|
84dc4d90bf9aefa82211b1222588897413efed38
|
239527cb57d12eb2ed02a396d74adfc824df5188
|
refs/heads/master
| 2021-06-15T05:13:24.558169
| 2021-03-18T23:36:37
| 2021-03-18T23:36:37
| 160,407,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
from collections import defaultdict
def calculate_distance(coord1, coord2):
return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1])
def get_coords():
f = open('6.txt')
lines = f.read().splitlines()
coords = []
for line in lines:
coord = line.split(', ')
coord = (int(coord[0]), int(coord[1]))
coords.append(coord)
return coords
def get_max_coord_value():
max_coord_value = 0
for coord in coords:
if coord[0] > max_coord_value:
max_coord_value = coord[0]
if coord[1] > max_coord_value:
max_coord_value = coord[1]
return max_coord_value
coords = get_coords()
max_coord_value = get_max_coord_value()
def calculate_value(coord):
n = 0
for c in coords:
n += calculate_distance(c, coord)
return n
max_value = 10000
canvas = {}
area = 0
for x in range(0, max_coord_value):
for y in range(0, max_coord_value):
coord = (x, y)
n = calculate_value(coord)
if n < max_value:
area += 1
print(area)
|
[
"desecho@gmail.com"
] |
desecho@gmail.com
|
901724595d4e863dcdff9795aeb24d6f8e20e023
|
989b3499948137f57f14be8b2c77d0610d5975e6
|
/python-package/daily_study/1주차(07.09~07.13)/1주차_파이썬 과제/ii_get_firsts_age.py
|
6f8acc79193e6540ffb53bc5bba8fd4a4015d551
|
[] |
no_license
|
namkiseung/python_BasicProject
|
76b4c070934ad4cb9d16ce844efa05f64fb09ac0
|
460d05248b2d1431624aba960e28bece888643e4
|
refs/heads/master
| 2022-12-13T21:12:06.865241
| 2020-04-23T01:30:08
| 2020-04-23T01:30:08
| 142,980,920
| 1
| 1
| null | 2022-12-08T02:27:40
| 2018-07-31T07:49:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
# -*- coding: utf-8 -*-
name_book = [{'age': 31, 'fav': ['3g', 'chi', 'piz'], 'name': 'ttamna'},
{'age': 32, 'fav': ['cof', 'greentea'], 'name': 'hope'},
{'age': 22, 'fav': ['sprite', 'pepsi'], 'name': 'mirae'},
{'age': 21, 'fav': ['choco', 'freetime'], 'name': 'gunin'},
{'age': 2, 'fav': ['can', 'godunguh'], 'name': 'mango'}]
def get_firsts_age():
#get_firsts_age()는 첫번째 요소의 'age'의 키값을 반환하는 함수
""" name_book의 첫번째 요소는 dict인데,
이 dict에서 키 'age'를 참조해 그 값을 반환하는 함수를 작성하자
"""
# 여기 작성
return name_book[0]['age']
if __name__ == "__main__":
print('age의 키값은? ',get_firsts_age())
pass
'''
[] 는 array를 쓰는 대표 타입 (배열 초기화나 선언시 사용)
arr = [] # 빈 배열을 만들 때 []사용
arr = [1,2,3,4] #원소가 있는 배열을 만들 때 []사용
arr[3] #배열의 3번째 원소에 접근할 때 []사용
() 는 tuple을 선언 초기화시 사용(원소 접근할때)
mytuple = () #빈 튜플 생성할 때 ()사용
mytuple = (1,2,3,4) # 원소가 있는 튜플을 만들 때 ()사용
mytuple[3] # 튜플의 원소에 접근할 때 []사용
{} 는 dictionary의 대표 타입(딕셔너리 선언 및 초기화시 사용. (키에 대응하는 값 할당하거나 접근))
mydictionary = {} #빈 딕셔너리 생성 시 {}사용
mydictionary = {"mouse":3, "penguin":5}
mydictionary["mouse"] # key("mouse")에 대응하는 value(3)에 접근할 때 사용
mydictionary["cat"] = 1 # key("cat")에 대한 value(1) 생성
'''
'''
[쓰레기가 된 코드]
c_name_book=dict()
#print(type(name_book))
#result = name_book["age"]
#for seq in len(name_book):
#c_name_book += name_book[seq]
'''
|
[
"rlzld100@gmail.com"
] |
rlzld100@gmail.com
|
e68623770b137be6b14de90280ff4515b723457a
|
38c10c01007624cd2056884f25e0d6ab85442194
|
/third_party/lzma_sdk/lzma_sdk.gyp
|
a1167d5f71c2b8b7d363c211a41bd59ce400bfd2
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
zenoalbisser/chromium
|
6ecf37b6c030c84f1b26282bc4ef95769c62a9b2
|
e71f21b9b4b9b839f5093301974a45545dad2691
|
refs/heads/master
| 2022-12-25T14:23:18.568575
| 2016-07-14T21:49:52
| 2016-07-23T08:02:51
| 63,980,627
| 0
| 2
|
BSD-3-Clause
| 2022-12-12T12:43:41
| 2016-07-22T20:14:04
| null |
UTF-8
|
Python
| false
| false
| 2,150
|
gyp
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'lzma_sdk_sources': [
'7z.h',
'7zAlloc.c',
'7zAlloc.h',
'7zBuf.c',
'7zBuf.h',
'7zCrc.c',
'7zCrc.h',
'7zCrcOpt.c',
'7zDec.c',
'7zFile.c',
'7zFile.h',
'7zIn.c',
'7zStream.c',
'Alloc.c',
'Alloc.h',
'Bcj2.c',
'Bcj2.h',
'Bra.c',
'Bra.h',
'Bra86.c',
'CpuArch.c',
'CpuArch.h',
'LzFind.c',
'LzFind.h',
'LzHash.h',
'Lzma2Dec.c',
'Lzma2Dec.h',
'LzmaEnc.c',
'LzmaEnc.h',
'LzmaDec.c',
'LzmaDec.h',
'LzmaLib.c',
'LzmaLib.h',
'Types.h',
],
},
'targets': [
{
'target_name': 'lzma_sdk',
'type': 'static_library',
'defines': [
'_7ZIP_ST',
'_LZMA_PROB32',
],
'variables': {
# Upstream uses self-assignment to avoid warnings.
'clang_warning_flags': [ '-Wno-self-assign' ]
},
'sources': [
'<@(lzma_sdk_sources)',
],
'include_dirs': [
'.',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'lzma_sdk64',
'type': 'static_library',
'defines': [
'_7ZIP_ST',
'_LZMA_PROB32',
],
'variables': {
# Upstream uses self-assignment to avoid warnings.
'clang_warning_flags': [ '-Wno-self-assign' ]
},
'include_dirs': [
'.',
],
'sources': [
'<@(lzma_sdk_sources)',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
},
],
}],
],
}
|
[
"zeno.albisser@hemispherian.com"
] |
zeno.albisser@hemispherian.com
|
fd122e785dcd67f5dfb38d8fb252da42a10b7b2e
|
46357db3b1c1af699384d9cba1ffbc3c732117ad
|
/python_basics/exercises/26_find_hcf_or_gcd.py
|
517945158aebeb2882ab90db0eb4fd6fa7b56cc1
|
[] |
permissive
|
khanhdodang/automation-training-python
|
28fbd70ca4bc84e47cf17d1e4702513863e38c44
|
b16143961cee869c7555b449e2a05abeae2dc3b5
|
refs/heads/master
| 2023-07-11T05:21:34.495851
| 2021-08-18T01:29:37
| 2021-08-18T01:29:37
| 285,208,030
| 0
| 8
|
MIT
| 2020-09-29T07:01:15
| 2020-08-05T07:01:46
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
'''
The highest common factor (H.C.F) or greatest common divisor (G.C.D) of two numbers is the largest positive integer that perfectly divides the two given numbers.
For example, the H.C.F of 12 and 14 is 2.
'''
# Python program to find H.C.F of two numbers
# define a function
def compute_hcf(x, y):
# choose the smaller number
if x > y:
smaller = y
else:
smaller = x
for i in range(1, smaller + 1):
if ((x % i == 0) and (y % i == 0)):
hcf = i
return hcf
num1 = 54
num2 = 24
print("The H.C.F. is", compute_hcf(num1, num2))
|
[
"khanhdo.pmp@gmail.com"
] |
khanhdo.pmp@gmail.com
|
138e01dd749c37a17258cce7de9bd8b7d0744481
|
0a21d5e72b4afbabcbf4ec0d65ea84cd8d6159c7
|
/Contest/weekly-contest-169/D.py
|
7b093659d1af3a7b5b3878292f1449b79aa09d2b
|
[] |
no_license
|
LuoJiaji/LeetCode-Demo
|
193f27ba36c93f9030435874a145c63a81d3c0f8
|
78e6e87c01848a1dc71b7dc0716029ece5f35863
|
refs/heads/master
| 2020-06-24T03:03:33.366537
| 2020-04-05T02:09:41
| 2020-04-05T02:09:41
| 198,830,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,126
|
py
|
# class Solution(object):
# def isSolvable(self, words, result):
# """
# :type words: List[str]
# :type result: str
# :rtype: bool
# """
# # used = []
# # flag = False
# def check(numlist, alpha, words, result):
# # pass
# flag = False
# nums = []
# for w in words:
# tmp = ''
# for a in w:
# ind = alpha.index(a)
# tmp += str(numlist[ind])
# nums.append(tmp)
# res = ''
# for i in result:
# ind = alpha.index(i)
# res += str(numlist[ind])
# s = 0
# for num in nums:
# s += int(num)
# if s == int(res):
# flag = True
# print(nums, res)
# return flag
# # print(nums, res)
# def dfs(alpha, i, used, numlist, cantzero):
# # numlist = copy.deepcopy(numlist)
# # used = copy.deepcopy(used)
# # print(numlist, used)
# if i == len(alpha):
# # print(i, len(alpha), numlist)
# res = check(numlist, alpha, words, result)
# return res
# for n in range(10):
# # print(n, used)
# if n == 0 and alpha[i] in cantzero:
# continue
# if n not in used:
# # new_used = used + [n]
# # new_numlist = numlist + [n]
# flag = dfs(alpha, i+1, used + [n], numlist + [n], cantzero)
# if flag:
# return True
# return False
# alpha = result
# cantzero = [result[0]]
# for w in words:
# alpha += w
# cantzero += [w[0]]
# alpha = list(set(alpha))
# cantzero = list(set(cantzero))
# print(alpha)
# print(cantzero)
# res = dfs(alpha, 0, [], [], cantzero)
# return res
# class Solution(object):
# def isSolvable(self, words, result):
# """
# :type words: List[str]
# :type result: str
# :rtype: bool
# """
# def reverse(s):
# ret = ""
# for i in range(len(s) - 1, -1, -1):
# ret += s[i]
# return ret
# data = [reverse(w) for w in words]
# data.append(reverse(result))
# print(data)
# n = len(data)
# m = len(result)
# s = {}
# a = set()
# def work(k, n, m, y):
# if k >= n * m:
# return y == 0
# if (k + 1) % n == 0:
# if data[k % n][k // n] in s:
# if y % 10 == s[data[k % n][k // n]]:
# return work(k + 1, n, m, y // 10)
# else:
# return False
# else:
# if y % 10 not in a:
# a.add(y % 10)
# s[data[k % n][k // n]] = y % 10
# if work(k + 1, n, m, y // 10):
# return True
# a.remove(y % 10)
# del s[data[k % n][k // n]]
# else:
# return False
# elif k // n >= len(data[k % n]):
# return work(k + 1, n, m, y)
# elif data[k % n][k // n] in s:
# if k // n == len(data[k % n]) - 1 and s[data[k % n][k // n]] == 0:
# return False
# return work(k + 1, n, m, y + s[data[k % n][k // n]])
# else:
# for i in range(10):
# if k // n == len(data[k % n]) - 1 and i == 0:
# continue
# if i not in a:
# s[data[k % n][k // n]] = i
# a.add(i)
# if work(k + 1, n, m, y + i):
# return True
# a.remove(i)
# del s[data[k % n][k // n]]
# return False
# return work(0, n, m, 0)
class Solution:
def isSolvable(self, words, result):
letter_dict = dict()
for word in words:
cnt = 1
for letter in word[::-1]:
if letter not in letter_dict:
letter_dict[letter] = 0
letter_dict[letter] += cnt
cnt *= 10
cnt = 1
for letter in result[::-1]:
if letter not in letter_dict:
letter_dict[letter] = 0
letter_dict[letter] -= cnt
cnt *= 10
print(letter_dict)
arr = sorted(letter_dict.values(), key=lambda x: abs(x), reverse=True)
print(arr)
length = len(arr)
flag_num = [True] * 10
flag_res = [False]
def dfs(i, s):
if flag_res[0]:
return
if i == length:
if s == 0:
flag_res[0] = True
return
# 剪枝
# num = 10
for num in range(10)[::-1]:
if flag_num[num]:
break
if num * sum([abs(arr[j]) for j in range(i, length)]) < abs(s):
return
for num in range(10):
if not flag_num[num]:
continue
flag_num[num] = False
dfs(i + 1, s + arr[i] * num)
flag_num[num] = True
dfs(0, 0)
return flag_res[0]
words = ["SEND", "MORE"]
result = "MONEY"
res = Solution().isSolvable(words, result)
print(res)
words = ["SIX", "SEVEN", "SEVEN"]
result = "TWENTY"
res = Solution().isSolvable(words, result)
print(res)
words = ["THIS", "IS", "TOO"]
result = "FUNNY"
res = Solution().isSolvable(words, result)
print(res)
words = ["LEET", "CODE"]
result = "POINT"
res = Solution().isSolvable(words, result)
print(res)
|
[
"lt920@126.com"
] |
lt920@126.com
|
42f4bf6c1f20d34b1dff13ff86e5188f43f078a1
|
aa2c3743c265c3db8a246a04f26df8428d23dd06
|
/tacker/api/v1/router.py
|
3f53618e26a630c4111a70c43a13a10fd9edda17
|
[
"Apache-2.0"
] |
permissive
|
SripriyaSeetharam/tacker
|
fb43740de8e791b7bfa121dd16c295dd380f03f0
|
0c5c2eb06fb6112b03b49c05c5cbffb0ba00587f
|
refs/heads/master
| 2021-01-22T01:55:17.327221
| 2015-07-08T21:07:02
| 2015-07-08T21:07:02
| 38,065,799
| 1
| 0
| null | 2015-06-25T18:13:25
| 2015-06-25T18:13:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import routes as routes_mapper
import six.moves.urllib.parse as urlparse
import webob
import webob.dec
import webob.exc
from tacker.api import extensions
from tacker.api.v1 import attributes
from tacker.openstack.common import log as logging
from tacker import wsgi
LOG = logging.getLogger(__name__)
class Index(wsgi.Application):
def __init__(self, resources):
self.resources = resources
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
metadata = {'application/xml': {'attributes': {
'resource': ['name', 'collection'],
'link': ['href', 'rel']}}}
layout = []
for name, collection in self.resources.iteritems():
href = urlparse.urljoin(req.path_url, collection)
resource = {'name': name,
'collection': collection,
'links': [{'rel': 'self',
'href': href}]}
layout.append(resource)
response = dict(resources=layout)
content_type = req.best_match_content_type()
body = wsgi.Serializer(metadata=metadata).serialize(response,
content_type)
return webob.Response(body=body, content_type=content_type)
class APIRouter(wsgi.Router):
@classmethod
def factory(cls, global_config, **local_config):
return cls(**local_config)
def __init__(self, **local_config):
mapper = routes_mapper.Mapper()
ext_mgr = extensions.ExtensionManager.get_instance()
ext_mgr.extend_resources("1.0", attributes.RESOURCE_ATTRIBUTE_MAP)
super(APIRouter, self).__init__(mapper)
|
[
"isaku.yamahata@intel.com"
] |
isaku.yamahata@intel.com
|
b40a2a276b7057d283cc97a08b18fff243b0a820
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/services/types/ad_group_extension_setting_service.py
|
ef41a0ce26e223a0dc2319488043f0f0ca9dd4e9
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826
| 2021-06-16T20:42:38
| 2021-06-16T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,946
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.resources.types import ad_group_extension_setting
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.services',
marshal='google.ads.googleads.v4',
manifest={
'GetAdGroupExtensionSettingRequest',
'MutateAdGroupExtensionSettingsRequest',
'AdGroupExtensionSettingOperation',
'MutateAdGroupExtensionSettingsResponse',
'MutateAdGroupExtensionSettingResult',
},
)
class GetAdGroupExtensionSettingRequest(proto.Message):
r"""Request message for
[AdGroupExtensionSettingService.GetAdGroupExtensionSetting][google.ads.googleads.v4.services.AdGroupExtensionSettingService.GetAdGroupExtensionSetting].
Attributes:
resource_name (str):
Required. The resource name of the ad group
extension setting to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateAdGroupExtensionSettingsRequest(proto.Message):
r"""Request message for
[AdGroupExtensionSettingService.MutateAdGroupExtensionSettings][google.ads.googleads.v4.services.AdGroupExtensionSettingService.MutateAdGroupExtensionSettings].
Attributes:
customer_id (str):
Required. The ID of the customer whose ad
group extension settings are being modified.
operations (Sequence[google.ads.googleads.v4.services.types.AdGroupExtensionSettingOperation]):
Required. The list of operations to perform
on individual ad group extension settings.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='AdGroupExtensionSettingOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
class AdGroupExtensionSettingOperation(proto.Message):
r"""A single operation (create, update, remove) on an ad group
extension setting.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v4.resources.types.AdGroupExtensionSetting):
Create operation: No resource name is
expected for the new ad group extension setting.
update (google.ads.googleads.v4.resources.types.AdGroupExtensionSetting):
Update operation: The ad group extension
setting is expected to have a valid resource
name.
remove (str):
Remove operation: A resource name for the removed ad group
extension setting is expected, in this format:
``customers/{customer_id}/adGroupExtensionSettings/{ad_group_id}~{extension_type}``
"""
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=ad_group_extension_setting.AdGroupExtensionSetting,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof='operation',
message=ad_group_extension_setting.AdGroupExtensionSetting,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateAdGroupExtensionSettingsResponse(proto.Message):
r"""Response message for an ad group extension setting mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v4.services.types.MutateAdGroupExtensionSettingResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateAdGroupExtensionSettingResult',
)
class MutateAdGroupExtensionSettingResult(proto.Message):
r"""The result for the ad group extension setting mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
554ab2c2d9d21b5149c32d9c6caf9f65d0dbe509
|
dd08a146a41a8114365a7b11b534935cb96ec0b5
|
/configs/activations/x101_4x4_1x.py
|
978ebdd35999768d449ea74d69c8814e2a657020
|
[
"Apache-2.0"
] |
permissive
|
kostas1515/GOL
|
2132020f97739278afd12fc1e7af2560c50781cb
|
70026b73cacf7ee9c8b209907a83cff7d7e34d65
|
refs/heads/master
| 2023-05-23T10:13:04.671246
| 2022-11-24T10:51:22
| 2022-11-24T10:51:22
| 513,964,766
| 18
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
_base_ = [
'../lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py'
]
# data = dict(train=dict(oversample_thr=0.0))
data = dict(train=dict(oversample_thr=0.0),samples_per_gpu=4)
model = dict(roi_head=dict(bbox_head=dict(loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True),
init_cfg = dict(type='Constant',val=0.001, bias=-6.5, override=dict(name='fc_cls')))))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[8, 11])
work_dir='./experiments/x101_4x4_1x/'
# work_dir='./experiments/test/'
fp16 = dict(loss_scale=512.)
|
[
"konsa15@liverpool.ac.uk"
] |
konsa15@liverpool.ac.uk
|
d1c83f7397067a5db4849163931e37a5e5a349c0
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/dataprotection/azure-mgmt-dataprotection/generated_samples/get_operation_result_patch.py
|
448ca7b2befea5dda6ba7aebacbd274a18d99b1d
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dataprotection import DataProtectionClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dataprotection
# USAGE
python get_operation_result_patch.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataProtectionClient(
credential=DefaultAzureCredential(),
subscription_id="04cf684a-d41f-4550-9f70-7708a3a2283b",
)
response = client.backup_vault_operation_results.get(
resource_group_name="SampleResourceGroup",
vault_name="swaggerExample",
operation_id="YWUzNDFkMzQtZmM5OS00MmUyLWEzNDMtZGJkMDIxZjlmZjgzOzdmYzBiMzhmLTc2NmItNDM5NS05OWQ1LTVmOGEzNzg4MWQzNA==",
)
print(response)
# x-ms-original-file: specification/dataprotection/resource-manager/Microsoft.DataProtection/preview/2022-11-01-preview/examples/VaultCRUD/GetOperationResultPatch.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
fcf7d0c7373084d9fb10b07ff25dc125d92190fe
|
36d924baf115884f48cf4e8de3415340b82abda5
|
/atomman/dump/poscar/dump.py
|
7d6ed24a88f0fa17c6717384f61be774816a7250
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
plin1112/atomman
|
499a430649a89bc9aa5cb716598d05b2783537d6
|
ced3a4e5a99a95ef582cf3dbb074eb75cad4a0a6
|
refs/heads/master
| 2020-03-17T19:28:04.032877
| 2019-05-24T03:17:46
| 2019-05-24T03:17:46
| 133,863,046
| 0
| 0
|
NOASSERTION
| 2019-05-24T03:17:47
| 2018-05-17T20:06:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,554
|
py
|
# Standard Python libraries
from __future__ import (absolute_import, print_function,
division, unicode_literals)
# http://www.numpy.org/
import numpy as np
# atomman imports
from ...compatibility import range
def dump(system, f=None, header='', symbols=None, style='direct',
box_scale=1.0, float_format='%.13e'):
"""
Generates a poscar-style coordination file for the system.
Parameters
----------
system : atomman.System
The system whose coordinates you are saving
f : str or file-like object, optional
File path or file-like object to write the content to. If not given,
then the content is returned as a str.
header : str, optional
The comment line to place at the top of the file. Default value is ''.
symbols : tuple, optional
List of the element symbols that correspond to the atom types. If not
given, will use system.symbols if set, otherwise no element content
will be included.
style : str, optional
The poscar coordinate style. Default value is 'direct'.
box_scale : float, optional
A universal scaling constant applied to the box vectors. Default value
is 1.0.
float_format : str, optional
c-style format for printing the floating point numbers. Default value
is '%.13e'.
Returns
-------
poscar_str : str
String of the poscar object (only returned if fname is not given).
"""
assert '\n' not in header, 'header can only be one line'
assert '\n' not in style, 'style can only be one line'
threexf = float_format + ' ' + float_format + ' ' + float_format
# Scale box vectors and write out the values
vects = system.box.vects / box_scale
poscar_string = '\n'.join([header,
float_format % box_scale,
threexf % tuple(vects[0]),
threexf % tuple(vects[1]),
threexf % tuple(vects[2])])
# Use system.symbols if set
if symbols is None:
if None not in system.symbols:
symbols = system.symbols
# Write symbols tags if they are given
if symbols is not None:
if not isinstance(symbols, (list, tuple)):
symbols = [symbols]
assert len(symbols) == system.natypes, 'length of symbols differs from number of atom types'
poscar_string += '\n' + ' '.join(symbols)
# Count how many atoms of each type
atype = system.atoms.atype
poscar_string += '\n'
uatype, counts = np.unique(atype, return_counts=True)
for i in range(1, int(uatype.max()+1)):
count = counts[uatype==i]
if count == []:
count = 0
else:
count = count[0]
poscar_string += '%i ' % count
# Check which coordinate style to use
poscar_string += '\n' + style
if style[0] in 'cCkK':
scale = False
else:
scale = True
# Write out positions
pos = system.atoms_prop(key='pos', scale=scale)
for a in range(1, system.natypes+1):
for p in pos[atype==a]:
poscar_string += '\n'+ threexf % tuple(p)
# Save to the file-like object
if hasattr(f, 'write'):
f.write(poscar_string)
# Save to the file name
elif f is not None:
with open(f, 'w') as fp:
fp.write(poscar_string)
# Return as a string
else:
return poscar_string
|
[
"lucas.hale@nist.gov"
] |
lucas.hale@nist.gov
|
d7be36e6e09482164160575426a42d6f073350ce
|
e17cd40f0a9e2452d685a754458f152a1cfb2c69
|
/open/core/betterself/models/supplement_stack.py
|
a29fbadc0050919745c7366ea59577be8717613f
|
[
"MIT"
] |
permissive
|
Rowlando13/open
|
6faec6f4f048284c2a69b64d7fb3767569dbcf3a
|
6c14c7bf8b915cea94f89b8af209be14489726e8
|
refs/heads/master
| 2022-12-02T00:34:56.987855
| 2020-08-20T22:01:25
| 2020-08-20T22:01:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
from open.utilities.fields import DEFAULT_MODELS_CHAR_FIELD
from open.utilities.models import BaseModelWithUserGeneratedContent
class SupplementStack(BaseModelWithUserGeneratedContent):
name = DEFAULT_MODELS_CHAR_FIELD
class Meta:
unique_together = ("user", "name")
ordering = ["user", "name"]
verbose_name = "Supplements Stack"
verbose_name_plural = "Supplements Stacks"
def __str__(self):
return "{} Stack".format(self.name)
@property
def description(self):
compositions = self.compositions.all()
descriptions = [composition.description for composition in compositions]
if descriptions:
return ", ".join(descriptions)
else:
return ""
|
[
"jeffshek@gmail.com"
] |
jeffshek@gmail.com
|
69bd648e2f22328b05bc72638b69a9862c70e36b
|
7090a13d5a33d2c1e500af239fe2a3f4a9996596
|
/trtools/tools/pload.py
|
ff5d82655582d01ae821a3b001351359dadb89e9
|
[
"MIT"
] |
permissive
|
andrewmhammond/trtools
|
97d382e560e9e303e34431d7a4248ac633b62b09
|
211cdb21377e36086c3d78fe9d711a9c12245359
|
refs/heads/master
| 2020-12-26T01:48:59.728488
| 2014-04-06T23:25:05
| 2014-04-06T23:25:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
import trtools.tools.datapanel as datapanel
import multiprocessing
_store = None
def _load(key):
return _store[key]
def pload(store, N=None, num_consumers=None):
"""
Parallelize the reading of a mapping class.
This is useful for any IO abstraction where you want to read
many files at once
Parameters:
store : mapping object
N : int
number of items to process, mostly for debugging
Note: This was built specifically for something like FileCache
"""
# set global so consumers processes have access
global _store
_store = store
keys = store.keys()
if N is None:
N = len(keys)
results = {}
# store on process so we aren't pickling it constantly
pvars = {'store':store}
loader = datapanel.DataPanel(keys, store=results)
loader.process(_load, num=N, num_consumers=num_consumers, process_vars=pvars)
return results
|
[
"dale@dalejung.com"
] |
dale@dalejung.com
|
37b3647bb50ae28ed528e2bd7ffccfe2adbf6942
|
ac47074bcf749273941ab01213bb6d1f59c40c99
|
/project/fund/fund_factor/alpha_factor/morningstar_risk_adjusted_return.py
|
ac09c255fb010636db5553366eb00d9f13b56a5e
|
[] |
no_license
|
xuzhihua95/quant
|
c5561e2b08370610f58662f2871f1f1490681be2
|
c7e312c70d5f400b7e777d2ff4c9f6f223eabfee
|
refs/heads/master
| 2020-05-19T17:04:08.796981
| 2019-04-24T02:50:29
| 2019-04-24T02:50:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,434
|
py
|
from datetime import datetime
import os
import pandas as pd
import numpy as np
from quant.fund.fund_pool import FundPool
from quant.fund.fund import Fund
from quant.stock.date import Date
from quant.param.param import Parameter
from quant.stock.macro import Macro
import calendar
class MorningStarRiskAdjustedReturn(object):
def __init__(self):
self.path = r"E:\3_Data\4_fund_data\2_fund_factor\alpha_factor"
def cal_factor_mrar(self, fund, T, r, end_date, fund_data, macro_data):
# T = 12
# r = 2
def fun_date(x):
year = int(x[0:4])
month = int(x[4:6])
day = calendar.monthrange(year, month)[1]
date = datetime(year, month, day).strftime("%Y%m%d")
return date
end_date = Date().get_normal_date_last_month_end_day(end_date)
fund_data = pd.DataFrame(fund_data.loc[:end_date, fund])
fund_data = fund_data.dropna()
fund_data["Month"] = fund_data.index.map(lambda x: x[0:6])
fund_month = fund_data.groupby(by=["Month"]).sum()
fund_month.index = fund_month.index.map(fun_date)
concat_data = pd.concat([fund_month, macro_data], axis=1)
concat_data.columns = ["FundReturn", "FreeRiskReturn"]
concat_data = concat_data.dropna()
concat_data["ExcessMonthRerurn"] = concat_data["FundReturn"] - concat_data["FreeRiskReturn"]
excess_return = pd.DataFrame(concat_data.loc[concat_data.index[-T:], "ExcessMonthRerurn"])
excess_return /= 100.0
if len(excess_return) == T:
excess_return["R"] = excess_return["ExcessMonthRerurn"].map(lambda x: (1+x)**(-r))
res = excess_return["R"].mean() ** (-12/r)
else:
res = np.nan
return res
def cal_factor_mrar_all(self, T, r, beg_date, end_date):
date_series = Date().get_normal_date_series(beg_date, end_date, "Q")
result = pd.DataFrame([], index=date_series)
def fun_date(x):
year = int(x[0:4])
month = int(x[4:6])
day = calendar.monthrange(year, month)[1]
date = datetime(year, month, day).strftime("%Y%m%d")
return date
macro_code = "S0059744"
macro_name = "中债国债到期收益率-1年"
macro_data = Macro().get_macro_data(macro_code, None, None)
macro_data.columns = [macro_name]
macro_data['YearMonth'] = macro_data.index.map(lambda x: x[0:6])
macro_data = macro_data.groupby(by=['YearMonth']).mean()[macro_name]
macro_data.index = macro_data.index.map(fun_date)
macro_data = pd.DataFrame(macro_data)
macro_data.columns = [macro_name]
macro_data /= 12.0
fund_data = Fund().get_fund_factor("Repair_Nav_Pct", None, None)
for i in range(len(date_series)):
# 日期
######################################################################################################
report_date = date_series[i]
# 基金池信息
######################################################################################################
fund_code_list = FundPool().get_fund_pool_code(date=report_date, name="基金持仓基准基金池")
fund_code_list3 = FundPool().get_fund_pool_code(date=report_date, name="量化基金")
fund_code_list2 = FundPool().get_fund_pool_code(date="20180630", name="东方红基金")
fund_code_list.extend(fund_code_list2)
fund_code_list.extend(fund_code_list3)
fund_code_list = list(set(fund_code_list))
fund_code_list.sort()
for i_fund in range(len(fund_code_list)):
fund = fund_code_list[i_fund]
print(report_date, fund)
try:
res = self.cal_factor_mrar(fund, T, r, end_date, fund_data, macro_data)
result.loc[report_date, fund] = res
except Exception as e:
result.loc[report_date, fund] = np.nan
result = result.T
file = os.path.join(self.path, "MorningStar_MRAR_" + str(r) + "_" + str(T) + '.csv')
result.to_csv(file)
if __name__ == "__main__":
beg_date = "20040331"
end_date = "20180909"
fund = "000001.OF"
T = 12
r = 2
MorningStarRiskAdjustedReturn().cal_factor_mrar_all(T, r, beg_date, end_date)
|
[
"1119332482@qq.com"
] |
1119332482@qq.com
|
31ee51f9fb901a1a711c8792108497d4f80f3159
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/41/usersdata/67/24524/submittedfiles/gravitacional.py
|
e231a12429dd729284c1d39d911cd4e681053dbb
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import funcoes
#ENTRADA
dimensao = input('Digite a dimensao das matrizes: ')
matrizA = input('Digite a Matriz A como uma única linha entre aspas: ')
matrizD = input('Digite a Matriz D como uma única linha entre aspas: ')
alfa = input('Digite o valor de alfa: ')
#PREPARANDO A ENTRADA
T = np.zeros((dimensao,dimensao))
A = np.fromstring(matrizA, sep=' ').reshape(dimensao, dimensao)
d = np.fromstring(matrizD, sep=' ').reshape(dimensao, dimensao)
#comece aqui...
#INÍCIO
somal=[]
ss=0
for i in range (0,a.shape[1],1):
for j in range(0,a.shape[0],1):
ss=a[i,j]+ss
somal.append=ss
#SAÍDA
somatorio = sum(sum(T))
print('%.4f' % somatorio)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
0bf0bdcbca713d816b6bf06d52c9ea4f4fc29158
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2009.1/programming/language/python/pysqlite/actions.py
|
d4b2fc36f873b9ceae3f7fec989ac524da571a25
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def install():
pythonmodules.install()
pisitools.remove("usr/pysqlite2-doc/install-source.txt")
# needs sphinx package for documentation
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
4d2b5f48e997fed08bc8c45f8f7918ba868a9710
|
e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7
|
/flask_api/venv/lib/python3.7/site-packages/vsts/task_agent/v4_1/models/service_endpoint_request.py
|
84e317a726ed5a2c330d46f34c75ccca9feae9e4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
u-blavins/secret_sasquatch_society
|
c36993c738ab29a6a4879bfbeb78a5803f4f2a57
|
0214eadcdfa9b40254e331a6617c50b422212f4c
|
refs/heads/master
| 2020-08-14T00:39:52.948272
| 2020-01-22T13:54:58
| 2020-01-22T13:54:58
| 215,058,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ServiceEndpointRequest(Model):
"""ServiceEndpointRequest.
:param data_source_details:
:type data_source_details: :class:`DataSourceDetails <task-agent.v4_1.models.DataSourceDetails>`
:param result_transformation_details:
:type result_transformation_details: :class:`ResultTransformationDetails <task-agent.v4_1.models.ResultTransformationDetails>`
:param service_endpoint_details:
:type service_endpoint_details: :class:`ServiceEndpointDetails <task-agent.v4_1.models.ServiceEndpointDetails>`
"""
_attribute_map = {
'data_source_details': {'key': 'dataSourceDetails', 'type': 'DataSourceDetails'},
'result_transformation_details': {'key': 'resultTransformationDetails', 'type': 'ResultTransformationDetails'},
'service_endpoint_details': {'key': 'serviceEndpointDetails', 'type': 'ServiceEndpointDetails'}
}
def __init__(self, data_source_details=None, result_transformation_details=None, service_endpoint_details=None):
super(ServiceEndpointRequest, self).__init__()
self.data_source_details = data_source_details
self.result_transformation_details = result_transformation_details
self.service_endpoint_details = service_endpoint_details
|
[
"usama.blavins1@gmail.com"
] |
usama.blavins1@gmail.com
|
71f9fd478ec198cda1f30d14323c68bd03250659
|
ba916d93dfb8074241b0ea1f39997cb028509240
|
/kickstart/2020/RoundD/record_breaker.py
|
270cc231f01a90a63cf18de06cdfa30e3bce0042
|
[] |
no_license
|
satojkovic/algorithms
|
ecc1589898c61d2eef562093d3d2a9a2d127faa8
|
f666b215bc9bbdab2d2257c83ff1ee2c31c6ff8e
|
refs/heads/master
| 2023-09-06T08:17:08.712555
| 2023-08-31T14:19:01
| 2023-08-31T14:19:01
| 169,414,662
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
T = int(input())
for t in range(1, T + 1):
N = int(input())
visitors = list(map(int, input().split(' ')))
prev_record = 0
res = 0
for i in range(N):
greater_than_prev = i == 0 or visitors[i] > prev_record
greater_than_next = i == N - 1 or visitors[i] > visitors[i + 1]
res = res + 1 if greater_than_prev and greater_than_next else res
prev_record = max(prev_record, visitors[i])
print('Case #{}: {}'.format(t, res))
|
[
"satojkovic@gmail.com"
] |
satojkovic@gmail.com
|
a2e53cd61b75aa7768b090cc1df2d8557dfba982
|
45eb1b25bf72d7c88a57fec5bb4bc5336c04f5ba
|
/reckon/loc.py
|
7f151997499dd7b720cb8586a16c4597bc41d887
|
[
"MIT"
] |
permissive
|
seandstewart/reckon
|
b10faece45e4c1ede5fa1c7e416179e7d1e68142
|
ddddb2b0d881e1226075d9eefdcef580826da750
|
refs/heads/master
| 2021-06-21T06:38:33.649537
| 2021-03-06T21:28:32
| 2021-03-06T21:28:32
| 193,951,846
| 2
| 0
|
MIT
| 2021-03-06T21:28:33
| 2019-06-26T17:44:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import collections
import threading
from typing import Callable
try:
from reprlib import repr
except ImportError:
pass
from . import protos
class LocalCache(protos.ProtoCache):
"""A Localized cache.
Can be implemented as a globalized cache by initializing at the top-level of a module.
"""
def __init__(
self,
*,
target_usage: float = None,
strategy: protos.CacheStrategy = protos.CacheStrategy.DYN
):
self._lock = threading.RLock()
with self._lock:
self.TARGET_RATIO = (
target_usage if target_usage is not None else self.TARGET_RATIO
)
self._cache = dict()
self._locks = collections.defaultdict(threading.RLock)
self._hits = 0
self._misses = 0
self.strategy = strategy
__getitem__ = protos.cache_getitem
get = protos.cache_get
keys = protos.cache_keys
values = protos.cache_values
items = protos.cache_items
info = protos.cache_info
clear = protos.clear_cache
size = protos.cache_size
usage = protos.memory_usage_ratio
memoize = protos.memoize
set_target_usage = protos.set_target_memory_use_ratio
# Assigned on init.
shrink = protos.shrink
def memoize(
_func: Callable = None,
*,
target_usage: float = LocalCache.TARGET_RATIO,
strategy: protos.CacheStrategy = protos.CacheStrategy.DYN
) -> Callable:
cache = LocalCache(target_usage=target_usage, strategy=strategy)
return cache.memoize(_func) if _func else cache.memoize
|
[
"sean_stewart@me.com"
] |
sean_stewart@me.com
|
774ff8553c11fe3a6fd04dbb2d0f46c015f512b3
|
97be97cfc56fb2170b60b91063dbfe5f1449e3c0
|
/python/ABC179/B.py
|
53752a55a98e2fa669ae9c197b305f57a1d95a7f
|
[] |
no_license
|
iWonder118/atcoder
|
73d965a0a9ade189733808e47634f2b7776aad4b
|
3ab7271e838a2903ff0e07f94015ef13c59577e1
|
refs/heads/master
| 2022-01-25T10:10:55.007340
| 2021-12-31T14:04:54
| 2021-12-31T14:04:54
| 245,155,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
n = int(input())
results = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for i in range(n):
if ans >= 3:
break
if results[i][0] == results[i][1]:
ans += 1
else:
ans = 0
if ans >= 3:
print("Yes")
else:
print("No")
|
[
"52240372+iWonder118@users.noreply.github.com"
] |
52240372+iWonder118@users.noreply.github.com
|
b4b369b2625b316d54996745d9eab2a7ccae7b52
|
73145f3548feb0812dde986242773f7d446e487f
|
/tests/tests.py
|
9da95dc88c2c111c8459f6c5975e43edaa44c135
|
[
"BSD-3-Clause"
] |
permissive
|
lookup/django-redis-sessions
|
9d4c31d71d1fb5d552b702e961066848e6443b9f
|
c9a1d3712d59d0fc972c9463e7718f7202cab41b
|
refs/heads/master
| 2021-01-17T23:28:53.249603
| 2013-03-11T22:40:33
| 2013-03-11T22:40:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,878
|
py
|
import time
from nose.tools import eq_
from django.utils.importlib import import_module
from django.conf import settings
redis_session = import_module(settings.SESSION_ENGINE).SessionStore()
def test_modify_and_keys():
eq_(redis_session.modified, False)
redis_session['test'] = 'test_me'
eq_(redis_session.modified, True)
eq_(redis_session['test'], 'test_me')
def test_save_and_delete():
redis_session['key'] = 'value'
redis_session.save()
eq_(redis_session.exists(redis_session.session_key), True)
redis_session.delete(redis_session.session_key)
eq_(redis_session.exists(redis_session.session_key), False)
def test_flush():
redis_session['key'] = 'another_value'
redis_session.save()
key = redis_session.session_key
redis_session.flush()
eq_(redis_session.exists(key), False)
def test_items():
redis_session['item1'], redis_session['item2'] = 1, 2
redis_session.save()
# Python 3.* fix
eq_(sorted(list(redis_session.items())), [('item1', 1), ('item2', 2)])
def test_expiry():
redis_session.set_expiry(1)
# Test if the expiry age is set correctly
eq_(redis_session.get_expiry_age(), 1)
redis_session['key'] = 'expiring_value'
redis_session.save()
key = redis_session.session_key
eq_(redis_session.exists(key), True)
time.sleep(2)
eq_(redis_session.exists(key), False)
def test_save_and_load():
redis_session.set_expiry(60)
redis_session.setdefault('item_test', 8)
redis_session.save()
session_data = redis_session.load()
eq_(session_data.get('item_test'), 8)
# def test_load():
# redis_session.set_expiry(60)
# redis_session['item1'], redis_session['item2'] = 1,2
# redis_session.save()
# session_data = redis_session.server.get(redis_session.session_key)
# expiry, data = int(session_data[:15]), session_data[15:]
|
[
"hellysmile@gmail.com"
] |
hellysmile@gmail.com
|
b13efcc972876375f804eb0816b1849ab2f0fd26
|
00ef8e1eb57b73427508b20aadf0266da6b1f900
|
/examples/gdev/any.py
|
7e9c6d05c4ea3e8b94d091aebc5b144a1fdb9e4d
|
[] |
no_license
|
amy12xx/rl-toolkit
|
f4643935cc8afd960356bfeae74c233d2596dea9
|
8254df8346752ea0226ae2064cc1eabc839567b0
|
refs/heads/master
| 2023-08-14T00:56:52.270642
| 2021-09-28T15:59:32
| 2021-09-28T15:59:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,167
|
py
|
import sys
sys.path.insert(0, './')
import os
import os.path as osp
import torch
import numpy as np
import argparse
import string
import random
import datetime
from garage import wrap_experiment
from garage.experiment.deterministic import set_seed
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from garage.trainer import Trainer
import pybullet_envs # noqa: F401 # pylint: disable=unused-import
import pybulletgym
from garage.torch import set_gpu_mode
from garage.sampler import LocalSampler
from garage.sampler import VecWorker
from garage.sampler import DefaultWorker
from garage.sampler import MultiprocessingSampler
from garage.envs import GymEnv, normalize
from garage.torch.algos import SAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
from garage.replay_buffer import PathBuffer
from garage.torch.optimizers import OptimizerWrapper
from torch import nn
from torch.nn import functional as F
from rlf.garage.auto_arg import convert_to_args, convert_kwargs
from rlf.args import str2bool
from rlf.exp_mgr import config_mgr
from dowel import logger
from rlf.garage.wb_logger import WbOutput
from rlf.garage.std_logger import StdLogger
def setup_def_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n-epochs', type=int, default=1000)
parser.add_argument('--batch-size', type=int, default=1000)
parser.add_argument('--hidden-dim', type=int, default=256)
parser.add_argument('--log-interval', type=int, default=10)
parser.add_argument('--depth', type=int, default=2)
parser.add_argument('--env-name', type=str, required=True)
parser.add_argument('--prefix', type=str, default='debug')
parser.add_argument('--env-norm', type=str2bool, default=False)
parser.add_argument('--cuda', type=str2bool, default=False)
parser.add_argument('--no-wb', action='store_true', default=False)
parser.add_argument('--alg', type=str, required=True)
return parser
def ppo_args(parser):
convert_to_args(PPO, parser)
parser.add_argument('--policy-lr', type=float, default=3e-4)
parser.add_argument('--vf-lr', type=float, default=3e-4)
parser.add_argument('--n-minibatches', type=float, default=10)
parser.add_argument('--minibatch-size', type=float, default=None)
def ppo_setup(env, trainer, args):
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[args.hidden_dim]*args.depth,
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=[args.hidden_dim]*args.depth,
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
policy_optimizer=OptimizerWrapper(
(torch.optim.Adam, dict(lr=args.policy_lr)),
policy,
max_optimization_epochs=args.n_minibatches,
minibatch_size=args.minibatch_size),
vf_optimizer=OptimizerWrapper(
(torch.optim.Adam, dict(lr=args.vf_lr)),
value_function,
max_optimization_epochs=args.n_minibatches,
minibatch_size=args.minibatch_size),
**convert_kwargs(args, PPO))
trainer.setup(algo, env, sampler_cls=LocalSampler, worker_class=VecWorker,
worker_args={'n_envs': 8})
return algo
def sac_args(parser):
convert_to_args(SAC, parser)
parser.add_argument('--buffer-size', type=float, default=1e6)
parser.add_argument('--gradient-steps-per-itr', type=int, default=1000)
def sac_setup(env, trainer, args):
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[args.hidden_dim]*args.depth,
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[args.hidden_dim]*args.depth,
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[args.hidden_dim]*args.depth,
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(args.buffer_size))
sac = SAC(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
**convert_kwargs(args, SAC))
trainer.setup(algo=sac, env=env, sampler_cls=LocalSampler)
return sac
USE_FNS = {
'ppo': (ppo_args, ppo_setup),
'sac': (sac_args, sac_setup),
}
def get_env_id(args):
upper_case = [c for c in args.env_name if c.isupper()]
if len(upper_case) == 0:
return ''.join([word[0] for word in args.env_name.split(".")])
else:
return ''.join(upper_case)
def create_prefix(args):
assert args.prefix is not None and args.prefix != '', 'Must specify a prefix'
d = datetime.datetime.today()
date_id = '%i%i' % (d.month, d.day)
env_id = get_env_id(args)
rnd_id = ''.join(random.sample(
string.ascii_uppercase + string.digits, k=2))
before = ('%s-%s-%s-%s-' %
(date_id, env_id, args.seed, rnd_id))
if args.prefix != 'debug' and args.prefix != 'NONE':
prefix = before + args.prefix
print('Assigning full prefix %s' % prefix)
else:
prefix = args.prefix
return prefix
def setup_launcher():
config_dir = osp.dirname(osp.realpath(__file__))
config_path = osp.join(config_dir, 'config.yaml')
config_mgr.init(config_path)
parser = setup_def_parser()
# First parse the regular args
base_args, _ = parser.parse_known_args()
get_args, get_algo = USE_FNS[base_args.alg]
use_prefix = create_prefix(base_args)
@wrap_experiment(archive_launch_repo=False, snapshot_mode='none', name=use_prefix)
def alg_train(ctxt=None):
get_args(parser)
args = parser.parse_args()
args.prefix = use_prefix
set_seed(args.seed)
env = GymEnv(args.env_name)
if args.env_norm:
env = normalize(env)
trainer = Trainer(ctxt)
logger.remove_all()
logger.add_output(StdLogger(args.log_interval))
if not args.no_wb:
wb_logger = WbOutput(args.log_interval, base_args)
logger.add_output(wb_logger)
algo = get_algo(env, trainer, args)
if args.cuda:
set_gpu_mode(True)
algo.to()
else:
set_gpu_mode(False)
trainer.train(n_epochs=args.n_epochs, batch_size=args.batch_size)
return alg_train
launcher = setup_launcher()
launcher()
|
[
"me@andrewszot.com"
] |
me@andrewszot.com
|
19ffe8a397c4cba7402ef7600fb331973f268134
|
6b6e20004b46165595f35b5789e7426d5289ea48
|
/release.py
|
a0439d9a8953d67b942543862779c0ee771d69e8
|
[
"Apache-2.0"
] |
permissive
|
anwarchk/quay
|
2a83d0ab65aff6a1120fbf3a45dd72f42211633b
|
23c5120790c619174e7d36784ca5aab7f4eece5c
|
refs/heads/master
| 2020-09-12T18:53:21.093606
| 2019-11-15T19:29:02
| 2019-11-15T19:29:02
| 222,517,145
| 0
| 0
|
Apache-2.0
| 2019-11-18T18:32:35
| 2019-11-18T18:32:35
| null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
import os
_GIT_HEAD_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'GIT_HEAD')
SERVICE = 'quay'
GIT_HEAD = None
REGION = os.environ.get('QUAY_REGION')
# Load git head if available
if os.path.isfile(_GIT_HEAD_PATH):
with open(_GIT_HEAD_PATH) as f:
GIT_HEAD = f.read().strip()
|
[
"jimmy.zelinskie+git@gmail.com"
] |
jimmy.zelinskie+git@gmail.com
|
d75c9bd6b13a1685e997c9bff89e619dfbad9617
|
4b41a76c5c366ba2daa30843acea16609b8f5da7
|
/2017/19/AoC17_19_2.py
|
63d0bff264fc8eed2403e85ab2ba25522ce454cd
|
[] |
no_license
|
grandfoosier/AdventOfCode
|
c4706cfefef61e80060cca89b0433636e42bf974
|
a43fdd72fe4279196252f24a4894500a4e272a5d
|
refs/heads/master
| 2020-06-11T12:36:48.699811
| 2019-01-14T23:44:44
| 2019-01-14T23:44:44
| 75,665,958
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,251
|
py
|
import copy
class Packet(object):
def __init__(self):
fname = "AoC17_19_1.txt"
self.paths = [line.rstrip("\n") for line in open(fname)]
self.dir = 'D'
self.pos = (0, self.paths[0].find('|'))
self.markers = ''
self.steps = 1; self.last = copy.copy(self.pos)
def _turn(self):
(y, x) = self.pos; (b, a) = self.last
self.steps += abs(y - b) + abs(x - a)
self.last = copy.copy(self.pos)
try:
if self.dir != 'D' and self.paths[y-1][x] != ' ': return 'U'
except: pass
try:
if self.dir != 'L' and self.paths[y][x+1] != ' ': return 'R'
except: pass
try:
if self.dir != 'U' and self.paths[y+1][x] != ' ': return 'D'
except: pass
try:
if self.dir != 'R' and self.paths[y][x-1] != ' ': return 'L'
except: pass
return 'X'
def _check(self):
(y, x) = self.pos
try:
if self.dir == 'U' and self.paths[y-1][x] == ' ':
return self._turn()
except: return self._turn()
try:
if self.dir == 'R' and self.paths[y][x+1] == ' ':
return self._turn()
except: return self._turn()
try:
if self.dir == 'D' and self.paths[y+1][x] == ' ':
return self._turn()
except: return self._turn()
try:
if self.dir == 'L' and self.paths[y][x-1] == ' ':
return self._turn()
except: return self._turn()
return self.dir
def _move(self):
(y, x) = self.pos
if self.dir == 'U': self.pos = (y-1, x)
elif self.dir == 'R': self.pos = (y, x+1)
elif self.dir == 'D': self.pos = (y+1, x)
elif self.dir == 'L': self.pos = (y, x-1)
if self.paths[y][x] not in ['|','-','+']:
self.markers += self.paths[y][x]
def follow_path(self):
while self.dir != 'X':
self.dir = self._check()
self._move()
(y, x) = self.pos; (b, a) = self.last
self.steps += abs(y - b) + abs(x - a)
self.last = copy.copy(self.pos)
return self.steps
P = Packet()
print ""
print P.follow_path()
print "\n"
|
[
"noreply@github.com"
] |
grandfoosier.noreply@github.com
|
a388e444b8e3f5e81960dffe79afb5a395d44c5c
|
a25e2aa102ffe9c2d9b553252a1882fe5a9d7ec9
|
/SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/formatting/formatting.py
|
ca562b09e2a12ddee21fe8ca8c472413569d9744
|
[
"MIT"
] |
permissive
|
MobileAnalytics/iPython-Framework
|
f96ebc776e763e6b4e60fb6ec26bb71e02cf6409
|
da0e598308c067cd5c5290a6364b3ffaf2d2418f
|
refs/heads/master
| 2020-03-22T06:49:29.022949
| 2018-07-04T04:22:17
| 2018-07-04T04:22:17
| 139,660,631
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.compat import iteritems, OrderedDict, deprecated
from openpyxl.styles.differential import DifferentialStyle
from .rule import Rule
def unpack_rules(cfRules):
for key, rules in iteritems(cfRules):
for idx,rule in enumerate(rules):
yield (key, idx, rule.priority)
class ConditionalFormatting(object):
"""Conditional formatting rules."""
def __init__(self):
self.cf_rules = OrderedDict()
self.max_priority = 0
def add(self, range_string, cfRule):
"""Add a rule such as ColorScaleRule, FormulaRule or CellIsRule
The priority will be added automatically.
"""
if not isinstance(cfRule, Rule):
raise ValueError("Only instances of openpyxl.formatting.rule.Rule may be added")
rule = cfRule
self.max_priority += 1
rule.priority = self.max_priority
self.cf_rules.setdefault(range_string, []).append(rule)
def _fix_priorities(self):
rules = unpack_rules(self.cf_rules)
rules = sorted(rules, key=lambda x: x[2])
for idx, (key, rule_no, prio) in enumerate(rules, 1):
self.cf_rules[key][rule_no].priority = idx
self.max_priority = len(rules)
@deprecated("Always use Rule objects")
def update(self, cfRules):
pass
@deprecated("Conditionl Formats are saved automatically")
def setDxfStyles(self, wb):
pass
|
[
"909889261@qq.com"
] |
909889261@qq.com
|
92258abe93cc42c815b9fcfd2422f11e6f2e3c37
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D09B/MOVINSD09BUN.py
|
5baaa0f135f7639211744f4c0fe1978fec2ced42
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD09BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TDT', MIN: 1, MAX: 3, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'HAN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DIM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 1, MAX: 99},
{ID: 'TMP', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'EQA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'EQN', MIN: 0, MAX: 1},
]},
{ID: 'GID', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'GDS', MIN: 0, MAX: 1},
]},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DGS', MIN: 1, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
f19254d602637fe894fafb4102e6d9b28bd124df
|
19d83ef36909a6d830e2e41af05102b19186ebbd
|
/memory.py
|
15a7965920fc4d8558bf68344a4db9f3ed4f597c
|
[] |
no_license
|
ChrisProgramming2018/BTD3_Implemenation
|
93637b2262b86e7ad19d048127d83da57c9c7508
|
9409cd472ca406c118a45ab60414a070f1a5f709
|
refs/heads/master
| 2023-01-06T15:30:56.171425
| 2020-10-28T06:58:06
| 2020-10-28T06:58:06
| 295,357,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
import numpy as np
import random
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
self.k = 0
def add(self, transition):
self.k += 1
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = transition
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(transition)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states), np.array(batch_next_states), batch_actions, np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
def get_last_k_trajectories(self):
ind = [x for x in range(self.ptr - self.k, self.ptr)]
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
|
[
"noreply@github.com"
] |
ChrisProgramming2018.noreply@github.com
|
d0a55f6475bbbb29dcd260910386e57a71f8243f
|
0316925e2bad29d60f0dcccdf91277fd8f03ef09
|
/q034.py
|
87da5609fd58d81457cd3687dc67218378f6df5f
|
[] |
no_license
|
nomadlife/project-euler
|
426614df7b2c9368a4db59954dc1df2902a44d6b
|
9bc09843637a361fa93c7abb20ac990f973b08e5
|
refs/heads/master
| 2021-07-03T00:52:07.760948
| 2021-06-26T14:28:32
| 2021-06-26T14:28:32
| 100,214,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# Q034 Digit factorials, all,
import time
start = time.time()
print("expected calculation time : 2 min")
def factorial(num):
total=1
while num>1:
total = total * num
num-=1
return total
for i in range(1,10000000):
total=0
for j in str(i):
total += factorial(int(j))
if total == i:
print("loop:{} factorial_sum:{} True".format(i,total))
# maximum range proof
for i in range(1,9):
print("9"*i, factorial(9)*i)
print("Calculation time:",time.time()-start)
|
[
"joonwoo3816@gmail.com"
] |
joonwoo3816@gmail.com
|
f2f503660cb416b4276951b9021d361ae00bc5f8
|
a56a74b362b9263289aad96098bd0f7d798570a2
|
/venv/bin/jupyter-serverextension
|
ee3c04b05b9c0eeb104333a5e6409bd21d07588f
|
[
"MIT"
] |
permissive
|
yoonkt200/ml-theory-python
|
5812d06841d30e1068f6592b5730a40e87801313
|
7643136230fd4f291b6e3dbf9fa562c3737901a2
|
refs/heads/master
| 2022-12-21T14:53:21.624453
| 2021-02-02T09:33:07
| 2021-02-02T09:33:07
| 132,319,537
| 13
| 14
|
MIT
| 2022-12-19T17:23:57
| 2018-05-06T08:17:45
|
Python
|
UTF-8
|
Python
| false
| false
| 283
|
#!/Users/A202009066/Documents/private-github/ml-theory-python/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from notebook.serverextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"kitae.yoon@deliveryhero.co.kr"
] |
kitae.yoon@deliveryhero.co.kr
|
|
2c2e336ab747dbdb1f5feca82e9d6644c1bdbe5d
|
90f52d0348aa0f82dc1f9013faeb7041c8f04cf8
|
/wxPython3.0 Docs and Demos/demo/PenAndBrushStyles.py
|
62c8f033f0917a085babb249479e55f1e22d50ef
|
[] |
no_license
|
resource-jason-org/python-wxPythonTool
|
93a25ad93c768ca8b69ba783543cddf7deaf396b
|
fab6ec3155e6c1ae08ea30a23310006a32d08c36
|
refs/heads/master
| 2021-06-15T10:58:35.924543
| 2017-04-14T03:39:27
| 2017-04-14T03:39:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,093
|
py
|
import wx
import images
pen_styles = ["wx.SOLID", "wx.TRANSPARENT", "wx.DOT", "wx.LONG_DASH",
"wx.SHORT_DASH", "wx.DOT_DASH", "wx.BDIAGONAL_HATCH",
"wx.CROSSDIAG_HATCH", "wx.FDIAGONAL_HATCH", "wx.CROSS_HATCH",
"wx.HORIZONTAL_HATCH", "wx.VERTICAL_HATCH", "wx.USER_DASH"]
if 'wxMSW' in wx.PlatformInfo:
pen_styles.append("wx.STIPPLE")
brush_styles = ["wx.SOLID", "wx.TRANSPARENT", "wx.STIPPLE", "wx.BDIAGONAL_HATCH",
"wx.CROSSDIAG_HATCH", "wx.FDIAGONAL_HATCH", "wx.CROSS_HATCH",
"wx.HORIZONTAL_HATCH", "wx.VERTICAL_HATCH"]
class BasePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER|wx.WANTS_CHARS)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnSize(self, event):
event.Skip()
self.Refresh()
class PenPanel(BasePanel):
def __init__(self, parent, pen_name):
BasePanel.__init__(self, parent)
self.pen_name = pen_name
def OnPaint(self, event):
width, height = self.GetClientSize()
dc = wx.AutoBufferedPaintDC(self)
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.MakeSmaller()
dc.SetFont(font)
w, labelHeight = dc.GetTextExtent('Wy')
name = self.pen_name
if "STIPPLE" in name:
bmp = images.Smiles.GetBitmap()
penWidth = 8 #bmp.GetHeight()
pen = wx.Pen(wx.BLUE, penWidth, eval(name))
pen.SetStipple(bmp)
else:
penWidth = 3
if 'HATCH' in name:
penWidth = 8
pen = wx.Pen(wx.BLUE, penWidth, eval(name))
if "USER" in name:
# dash values represent units on, off, on. off...
pen.SetDashes([2, 5, 2, 2])
name += " ([2, 5, 2, 2])"
dc.SetTextForeground(wx.BLACK)
dc.DrawText(name, 1, 1)
dc.SetPen(pen)
y = labelHeight + (height - labelHeight)/2
dc.DrawLine(5, y, width-5, y)
class BrushPanel(BasePanel):
def __init__(self, parent, brush_name):
BasePanel.__init__(self, parent)
self.brush_name = brush_name
def OnPaint(self, event):
width, height = self.GetClientSize()
dc = wx.AutoBufferedPaintDC(self)
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.MakeSmaller()
dc.SetFont(font)
w, labelHeight = dc.GetTextExtent('Wy')
dc.SetPen(wx.TRANSPARENT_PEN)
name = self.brush_name
if "STIPPLE" in name:
bmp = images.Smiles.GetBitmap()
bmp.SetMask(None)
brush = wx.BrushFromBitmap(bmp)
else:
brush = wx.Brush(wx.BLUE, eval(name))
dc.SetTextForeground(wx.BLACK)
dc.DrawText(name, 1, 1)
dc.SetBrush(brush)
dc.DrawRectangle(5, labelHeight+2, width-10, height-labelHeight-5-2)
class TestPanel(wx.Panel):
def __init__(self, *args, **kw):
wx.Panel.__init__(self, *args, **kw)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetWeight(wx.BOLD)
mainSizer = wx.BoxSizer(wx.VERTICAL)
label1 = wx.StaticText(self, -1, "Pen Styles:")
label1.SetFont(font)
mainSizer.Add(label1, 0, wx.EXPAND|wx.ALL, 10)
gs1 = wx.GridSizer(4, 4, 3, 3) # rows, cols, vgap, hgap
for pen_name in pen_styles:
small = PenPanel(self, pen_name)
gs1.Add(small, 0, wx.EXPAND)
mainSizer.Add(gs1, 1, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 10)
label2 = wx.StaticText(self, -1, "Brush Styles:")
label2.SetFont(font)
mainSizer.Add(label2, 0, wx.EXPAND|wx.ALL, 10)
gs2 = wx.GridSizer(3, 3, 3, 3) # rows, cols, vgap, hgap
for brush_name in brush_styles:
small = BrushPanel(self, brush_name)
gs2.Add(small, 0, wx.EXPAND)
mainSizer.Add(gs2, 1, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, 10)
self.SetSizer(mainSizer)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>Pen and Brush Styles</center></h2>
This sample shows an e3xample of drawing with each of the available
wx.Pen and wx.Brush styles.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
[
"869999860@qq.com"
] |
869999860@qq.com
|
1e17f23fd4fc3919a1165407f95e07c0435195be
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/freudian-translator/Python/test.py
|
ccece49a500303752883fdcb55bc12ed42a76600
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Python - 2.7.6
Test.describe('Basic tests')
Test.assert_equals(to_freud('test'), 'sex')
Test.assert_equals(to_freud('sexy sex'), 'sex sex')
Test.assert_equals(to_freud('This is a test'), 'sex sex sex sex')
Test.assert_equals(to_freud('This is a longer test'), 'sex sex sex sex sex')
Test.assert_equals(to_freud("You're becoming a true freudian expert"), 'sex sex sex sex sex sex')
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
774ac47a0f6c5172d745e86ec2e211a1d8970ad4
|
c97d3c8848e4f03edb6c64b6abff530a6e74d616
|
/apps/__init__.py
|
9dc47c4de6093e3511f7fe4e63949ac44c74b162
|
[
"Apache-2.0"
] |
permissive
|
simhaonline/Django_web
|
eeb80d8f32a460258fceb30ecececd7410949f72
|
f7df1a7b101d41835a334b78cddf3570968799e4
|
refs/heads/master
| 2023-04-24T23:33:51.535515
| 2021-04-02T15:20:29
| 2021-04-02T15:20:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# __author__ : stray_camel
# __description__ :
# __REFERENCES__ :
# __date__: 2020/05/25 10
from .constants import system_name, description
__all__ = [
'description', 'system_name',
]
|
[
"aboyinsky@outlook.com"
] |
aboyinsky@outlook.com
|
cfb20ae27607364005a23d811d3836639a73b19b
|
afbcee5187c88b52b416fa742baa825c14cd9d7c
|
/CarelinkUploadDownload/CheckForUSB.py
|
05a26260d92f1453d9ac28d1a045f75d741e33a6
|
[] |
no_license
|
brendlin/BGSuggest
|
7fae8665e8c2ea9d60980f3f3e5d57be406dd290
|
c2ad6019e323d22358f5a1af35f6683a96a6366d
|
refs/heads/master
| 2021-01-17T07:08:12.729631
| 2019-07-29T17:47:35
| 2019-07-29T17:47:35
| 15,495,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
import sys
import usb.core
# Put the following lines in your bash_profile:
# export PYTHONPATH=$PYTHONPATH:$HOME/pyusb-1.0.0rc1
# export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:$HOME/libusb/lib
def CheckForUSB() :
found = False
var_usb_device = usb.core.find(find_all = True)
for var_dev in var_usb_device:
var_usb = usb.core.find(idVendor=var_dev.idVendor, idProduct=var_dev.idProduct)
if not getattr(var_usb,'langids') :
continue
var_manu = usb.util.get_string(var_usb,var_usb.iManufacturer,langid=getattr(var_usb,'langids')[0])
if 'Bayer HealthCare LLC' not in var_manu :
continue
found = True
if True :
continue
var_product = usb.util.get_string(var_usb,var_dev.iProduct ,langid=getattr(var_usb,'langids')[0])
var_serial = usb.util.get_string(var_usb,var_dev.iSerialNumber,langid=getattr(var_usb,'langids')[0])
var_drv = var_usb.is_kernel_driver_active(0)
var_cfg = var_usb.get_active_configuration()
var_int = var_cfg[(0,0)].bInterfaceNumber
print "iManufacturer: ", var_dev.iManufacturer, hex(var_dev.iManufacturer)
print "IdVendor: ", var_dev.idVendor, hex(var_dev.idVendor)
print "IdProduct: ", var_dev.idProduct, hex(var_dev.idProduct)
print "Manufacturer: ", var_manu
print "Product: ", var_product
print "Serial: ", var_serial
print "Interface #: ", var_int
print "Kernel Driver: ", var_drv
for var_config in var_usb:
for var_i in var_config:
for var_e in var_i:
print " - Endpoint Address: ", var_e.bEndpointAddress
return found
|
[
"kurt.brendlinger@cern.ch"
] |
kurt.brendlinger@cern.ch
|
e184433261654f1e09efb557b3037e57f2b7a13e
|
dca232d51f508edbb37e85f6744e22fb1c9a5a20
|
/lifesaver/bot/exts/health.py
|
4f04ccc66e0aa4c4da0b593ea6ec31633e0aaa48
|
[
"MIT"
] |
permissive
|
Gorialis/lifesaver
|
835d0fda04b7d8a436f37184b6419a7ab46fe885
|
3a91c5e5ec60dce0c076d83d984c3a270113e484
|
refs/heads/master
| 2021-08-31T10:13:25.219124
| 2017-12-21T01:53:22
| 2017-12-21T01:53:22
| 114,949,865
| 0
| 0
| null | 2017-12-21T01:52:07
| 2017-12-21T01:52:06
| null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
from discord.ext.commands import command
from lifesaver.bot import Cog
class Health(Cog):
@command()
async def ping(self, ctx):
"""Pings the bot."""
ping_time = round(ctx.bot.latency * 1000, 2)
await ctx.send(f'Pong! Heartbeat latency: `{ping_time}ms`')
def setup(bot):
bot.add_cog(Health(bot))
|
[
"cheesy.fried.bacon@gmail.com"
] |
cheesy.fried.bacon@gmail.com
|
215ae92a7fe987232180e57debe40043791f172c
|
386cf667134c1db3242823b33bd5537a462ce986
|
/app/user/urls.py
|
63d01ec10c4268b0bd2c2b07d1cf5c2bdea21b1d
|
[
"MIT"
] |
permissive
|
mukulkkumar/docker-travis-django
|
795aaff47e54a211e16cc32104693e80c3634210
|
de1d6471538ff2b86f97ee7742bb548875c0b39b
|
refs/heads/main
| 2023-04-22T04:07:01.554172
| 2021-05-15T04:52:36
| 2021-05-15T04:52:36
| 364,581,976
| 1
| 0
|
MIT
| 2021-05-15T04:52:37
| 2021-05-05T13:15:13
|
Python
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
]
|
[
"mukulkkumarr@gmail.com"
] |
mukulkkumarr@gmail.com
|
f1833a5283b95fdf289b1c1117fb0e652ec8a137
|
77a8581bb042b1164a2aee2c581ebaeba1cd571e
|
/manager/migrations/0001_initial.py
|
ea059195deb4dda13ebe62bb251ec602d5ab19c1
|
[
"Apache-2.0"
] |
permissive
|
EruDev/eru_manager
|
fb6f00d49c40113cf0a0871bc1aa0f771c23cb08
|
9bc7fb3af361c7de734bfa6c4e1562dd7f978500
|
refs/heads/master
| 2022-12-18T03:14:45.830681
| 2021-06-11T05:42:16
| 2021-06-11T05:42:16
| 139,976,816
| 18
| 5
|
Apache-2.0
| 2022-12-08T02:15:21
| 2018-07-06T11:34:19
|
SCSS
|
UTF-8
|
Python
| false
| false
| 896
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-06-10 23:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UseInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True, verbose_name='用户名')),
('password', models.CharField(max_length=128, verbose_name='密码')),
('email', models.CharField(max_length=128, verbose_name='邮箱')),
],
options={
'verbose_name': '用户表',
'verbose_name_plural': '用户表',
},
),
]
|
[
"1027926875@qq.com"
] |
1027926875@qq.com
|
f5e58843cb02368ad358651d4056459f7b2f17f0
|
0b32ba2b4537e024f7edb7682446ca948366111c
|
/pygly/GlycanFactory.py
|
8903265d3abe74822b21e0909187ad315f744b4e
|
[] |
no_license
|
alternativeTime/PyGly
|
b459c737b2b24918314ad0e7ebc1696a7d4c5314
|
68675e6896bc7bfc625cda422d08b7ec102e74e5
|
refs/heads/master
| 2022-04-19T16:12:34.421317
| 2020-04-14T23:37:33
| 2020-04-14T23:37:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
from ReferenceTable import ReferenceTable
from GlycanFormatter import GlycoCTFormat
from MonoFactory import MonoFactory
from Monosaccharide import Anomer, Linkage
# Should structures specified by their oxford abbreviation use
# undetermined linkages for their antennae? Currently no, but
# perhaps. Right now we just get one linkage instantiation.
class GlycanFactory(ReferenceTable):
def __init__(self):
self.fmt = GlycoCTFormat()
self.mf = MonoFactory()
super(GlycanFactory,self).__init__()
def new(self,key):
return self[key].clone()
def parseSection(self,name,kv):
aliases = [name]
g = self.fmt.toGlycan('\n'.join(kv['GlycoCT'].split()))
aliases.extend(map(str.strip,kv.get('Aliases','').split(';')))
return [(a,g) for a in aliases]
def add_mono(self, parent, name, parent_pos,
child_pos=1, anomer=Anomer.beta,
parent_type=Linkage.oxygenPreserved,
child_type=Linkage.oxygenLost):
m = self.mf.new(name)
m.set_anomer(anomer)
parent.add_child(m,parent_pos=parent_pos,
child_pos=child_pos,
parent_type=parent_type,
child_type=child_type)
return m
def oxford2Glycan(self,name):
if name in self:
return self.new(name)
p = 0
if name[p] == 'F':
g = self.new('FM3')
p += 1
else:
g = self.new('M3')
# print repr(g)
# print self.fmt.toStr(g)
# print self
r = g.root()
glcnac2 = filter(lambda m: m.compatible(self.mf['GlcNAc']), r.children())[0]
man1 = glcnac2.children()[0]
man16 = [l.child() for l in man1.links() if l.parent_pos() == 6][0]
man13 = [l.child() for l in man1.links() if l.parent_pos() == 3][0]
assert name[p] == 'A'
nant = int(name[p+1])
ant = [None]
if nant in (1,2,3,4):
ant.append(self.add_mono(man13,'GlcNAc',parent_pos=2))
if nant in (2,3,4):
ant.append(self.add_mono(man16,'GlcNAc',parent_pos=2))
if nant in (3,4):
ant.append(self.add_mono(man13,'GlcNAc',parent_pos=4))
if nant in (4,):
ant.append(self.add_mono(man16,'GlcNAc',parent_pos=6))
p += 2
if p >= len(name):
return g
if name[p] == 'B':
b = self.add_mono(man1,'GlcNAc',4)
name[p] += 1
if p >= len(name):
return g
if name[p] == 'F':
nfuc = int(name[p+1])
assert (nfuc <= nant)
for fi in range(1,nfuc+1):
self.add_mono(ant[fi],'Fuc',parent_pos=6,anomer=Anomer.alpha)
p += 2
if p >= len(name):
return g
assert(name[p] == 'G')
ngal = int(name[p+1])
gal = [None]
assert (ngal <= nant)
for gi in range(1,ngal+1):
gal.append(self.add_mono(ant[gi],'Gal',parent_pos=4))
p += 2
if p >= len(name):
return g
assert(name[p] == 'S')
nsia = int(name[p+1])
sia = [None]
assert (nsia <= ngal)
for si in range(1,nsia+1):
sia.append(self.add_mono(gal[si],'Neu5Ac',parent_pos=6,child_pos=2,anomer=Anomer.alpha))
return g
|
[
"edwardsnj@gmail.com"
] |
edwardsnj@gmail.com
|
3caf6a3c0c2152578cd3ce56cabc211fa6d8b6a5
|
3e1b46a7b4b71d24c40a53c9ceda310e4114ad91
|
/allennlp_models/rc/transformer_qa/transformer_qa_predictor.py
|
530ee5b38f8ae8a782dc3bff3c59453e24ecfd27
|
[
"Apache-2.0"
] |
permissive
|
codehunk628/allennlp-models
|
dc1de94ec4607a05ddcb31a2e5a8af7bfaf9686e
|
83a14c4f4bef0c3e99f47dd1f380b48cbbba0ba6
|
refs/heads/master
| 2022-07-06T18:41:42.443241
| 2020-05-08T21:55:27
| 2020-05-08T21:55:27
| 262,493,726
| 1
| 0
|
Apache-2.0
| 2020-05-09T05:10:17
| 2020-05-09T05:10:17
| null |
UTF-8
|
Python
| false
| false
| 3,662
|
py
|
from typing import List, Dict, Any
from allennlp.models import Model
from overrides import overrides
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance, DatasetReader
from allennlp.predictors.predictor import Predictor
@Predictor.register("transformer_qa")
class TransformerQAPredictor(Predictor):
"""
Predictor for the :class:`~allennlp_rc.models.TransformerQA` model, and any
other model that takes a question and passage as input.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super(TransformerQAPredictor, self).__init__(model, dataset_reader)
self._next_qid = 1
def predict(self, question: str, passage: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.
Parameters
----------
question : ``str``
A question about the content in the supplied paragraph. The question must be answerable by a
span in the paragraph.
passage : ``str``
A paragraph of information relevant to the question.
Returns
-------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json({"context": passage, "question": question})
def predict_json(self, inputs: JsonDict) -> JsonDict:
results = self.predict_batch_json([inputs])
assert len(results) == 1
return results[0]
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
raise NotImplementedError(
"This predictor maps a question to multiple instances. "
"Please use _json_to_instances instead."
)
def _json_to_instances(self, json_dict: JsonDict) -> List[Instance]:
result = list(
self._dataset_reader.make_instances(
qid=str(self._next_qid),
question=json_dict["question"],
answers=[],
context=json_dict["context"],
first_answer_offset=None,
)
)
self._next_qid += 1
return result
@overrides
def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:
instances = []
for json_dict in json_dicts:
instances.extend(self._json_to_instances(json_dict))
return instances
@overrides
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
result = self.predict_batch_instance(instances)
assert len(result) == len(inputs)
return result
@overrides
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
# group outputs with the same question id
qid_to_output: Dict[str, Dict[str, Any]] = {}
for instance, output in zip(instances, outputs):
qid = instance["metadata"]["id"]
output["id"] = qid
output["answers"] = instance["metadata"]["answers"]
if qid in qid_to_output:
old_output = qid_to_output[qid]
if old_output["best_span_scores"] < output["best_span_scores"]:
qid_to_output[qid] = output
else:
qid_to_output[qid] = output
return [sanitize(o) for o in qid_to_output.values()]
|
[
"dirkg@allenai.org"
] |
dirkg@allenai.org
|
eb8e6e115f2490e2dbf8a872be81ed5f9929010a
|
f097c3488bcfd1e5d0a566f41cbac8980795aa0f
|
/tests/academics/models/logistics/test_instructor.py
|
6aa08e826353df34aba76235d9be2a7a1f5e23d5
|
[
"MIT"
] |
permissive
|
compserv/hknweb
|
fdce5d5a8e5402ce64f6d93adcea9b43fc920874
|
76f91d2c118bd017d3b714b805d08b5c49c5693e
|
refs/heads/master
| 2023-04-29T12:58:00.253960
| 2023-02-16T01:47:17
| 2023-02-16T01:47:17
| 110,480,397
| 21
| 113
|
MIT
| 2023-09-03T01:46:27
| 2017-11-13T00:10:26
|
Python
|
UTF-8
|
Python
| false
| false
| 330
|
py
|
from django.test import TestCase
from tests.academics.utils import ModelFactory
class InstructorModelTests(TestCase):
def setUp(self):
instructor_id = "my instructor id"
instructor = ModelFactory.create_instructor(instructor_id)
self.instructor = instructor
def test_basic(self):
pass
|
[
"noreply@github.com"
] |
compserv.noreply@github.com
|
b96f9396c26d6c99d8a20b9366d3bccc8f4187df
|
a034d4ba39789e4a351112c46dd04a38180cd06c
|
/appengine/findit/infra_api_clients/swarming/swarming_task_request.py
|
278735e0edeb5230cee5099b1d451c223d7ae774
|
[
"BSD-3-Clause"
] |
permissive
|
asdfghjjklllllaaa/infra
|
050ad249ab44f264b4e2080aa9537ce74aafb022
|
8f63af54e46194cd29291813f2790ff6e986804d
|
refs/heads/master
| 2023-01-10T21:55:44.811835
| 2019-07-01T14:03:32
| 2019-07-01T14:03:32
| 194,691,941
| 1
| 0
|
BSD-3-Clause
| 2023-01-07T07:12:37
| 2019-07-01T14:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,119
|
py
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from libs.list_of_basestring import ListOfBasestring
from libs.structured_object import StructuredObject
class SwarmingTaskInputsRef(StructuredObject):
"""Contains information on the locations of the binaries to run against."""
# A hash represented the ioslated input pointing to the binaries to test.
isolated = basestring
# The url to the server the isolated inputs reside on.
isolatedserver = basestring
namespace = basestring
class SwarmingTaskProperties(StructuredObject):
"""Fields populated in swarming task requests."""
caches = list
command = basestring
env_prefixes = list
dimensions = list
env = list
# The maximum amount of time the swarming task is allowed to run before being
# terminated returned as a string representation of an int.
execution_timeout_secs = basestring
extra_args = ListOfBasestring
# String representation of int.
grace_period_secs = basestring
idempotent = bool
# Information pointing to the location of the test binaries.
inputs_ref = SwarmingTaskInputsRef
# String representaiton of int.
io_timeout_secs = basestring
class SwarmingTaskRequest(StructuredObject):
"""Represents a task request on Swarming server."""
# The created timestamp according to Swarming, returned as a string
# representation of a timestamp.
created_ts = basestring
# String representation of int.
expiration_secs = basestring
# The name of the swarming task.
name = basestring
parent_task_id = basestring
# The priority of the swarming task. The lower the number, the higher the
# priority, represented as a string.
priority = basestring
service_account = basestring
tags = ListOfBasestring
user = basestring
properties = SwarmingTaskProperties
# Pub/Sub parameters
pubsub_topic = basestring
pubsub_auth_token = basestring
pubsub_userdata = basestring
@staticmethod
def GetSwarmingTaskRequestTemplate():
"""Returns a template SwarmingTaskRequest object with default values."""
return SwarmingTaskRequest(
created_ts=None,
expiration_secs='3600',
name='',
parent_task_id='',
priority='150',
properties=SwarmingTaskProperties(
caches=[],
command=None,
dimensions=[],
env=[],
env_prefixes=[],
execution_timeout_secs='3600',
extra_args=ListOfBasestring(),
grace_period_secs='30',
io_timeout_secs='1200',
idempotent=True,
inputs_ref=SwarmingTaskInputsRef(
isolated=None, isolatedserver=None, namespace=None)),
pubsub_auth_token=None,
pubsub_topic=None,
pubsub_userdata=None,
service_account=None,
tags=ListOfBasestring(),
user='')
@classmethod
def FromSerializable(cls, data):
"""Deserializes the given data into a SwarmingTaskRequest.
Because Swarming frequently adds new fields to task requests, maintaining
a strict 1:1 mapping between Findit and Swarming is not feasible. Instead
when deserializing a swarming task request, only consider the fields that
are necessary.
Args:
data (dict): The dict mapping from defined attributes to their values.
Returns:
An instance of the given class with attributes set to the given data.
"""
properties = data.get('properties', {})
inputs_ref = properties.get('inputs_ref', {})
return SwarmingTaskRequest(
created_ts=data.get('created_ts'),
expiration_secs=str(data.get('expiration_secs')),
name=data.get('name'),
parent_task_id=data.get('parent_task_id'),
priority=str(data.get('priority')),
properties=SwarmingTaskProperties(
caches=properties.get('caches'),
command=properties.get('command'),
dimensions=properties.get('dimensions') or [],
env=properties.get('env') or [],
env_prefixes=properties.get('env_prefixes') or [],
execution_timeout_secs=str(
properties.get('execution_timeout_secs')),
extra_args=ListOfBasestring.FromSerializable(
properties.get('extra_args') or []),
grace_period_secs=str(properties.get('grace_period_secs')),
io_timeout_secs=str(properties.get('io_timeout_secs')),
idempotent=properties.get('idempotent'),
inputs_ref=SwarmingTaskInputsRef(
isolated=inputs_ref.get('isolated'),
isolatedserver=inputs_ref.get('isolatedserver'),
namespace=inputs_ref.get('namespace'))),
pubsub_auth_token=data.get('pubsub_auth_token'),
pubsub_topic=data.get('pubsub_topic'),
pubsub_userdata=data.get('pubsub_userdata'),
service_account=data.get('service_account'),
tags=ListOfBasestring.FromSerializable(data.get('tags') or []),
user=data.get('user'))
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
ad346a7474716451000c9a1b097d3ada0ac109a9
|
d13a069fda8ce2e0a202eb43266af7558355cdc6
|
/ROJASCUBAS/app24.py
|
24c86c1d8f04cbe20399b5b3f06f7386c6fc1e6f
|
[] |
no_license
|
CARLOSC10/T09_LIZA.DAMIAN_ROJAS.CUBAS
|
879aba53c1db9ed2bfc4c37da3bf1cbd9df14d97
|
357ec4a3c266fa4ddf8d13b1ecb2af0feb604755
|
refs/heads/master
| 2020-11-27T04:52:52.004005
| 2019-12-20T18:10:12
| 2019-12-20T18:10:12
| 229,309,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
import os
import libreria
cliente=os.sys.argv[1]
total_pagar=float(os.sys.argv[2])
consumo_energia=float(os.sys.argv[3])
libreria.MOSTRAR_RECIBO(cliente,total_pagar,consumo_energia)
|
[
"clizad@unprg.edu.pe"
] |
clizad@unprg.edu.pe
|
39d800e56c5d45069919499cce94f111c88d636d
|
22eea10f9b62ad742be7a0ee9109a913e0ddb1bb
|
/StInt/EPI/Arrays/Merged-Intervals/merge_detect.py
|
85969ec6b2e6d9edc6af203e1682077cb92d5dc9
|
[] |
no_license
|
mihirkelkar/languageprojects
|
f991610476fd64aabfec5a8cc60c263908085e17
|
4439fcf843a964fccf14a1c4dba6d054ca35d048
|
refs/heads/master
| 2021-01-01T20:16:22.322304
| 2020-12-20T19:03:40
| 2020-12-20T19:03:40
| 20,080,717
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
class Event(object):
def __init__(self, start, end):
self.start = start
self.end = end
def print_times(self):
print self.start,
print self.end
def find_merge_intervals(list_of_intervals):
sorted_list = sorted(list_of_intervals, key = lambda x : x.start)
for ii in range(1, len(sorted_list)):
if sorted_list[ii - 1].end >= sorted_list[ii].start:
sorted_list[ii].start = sorted_list[ii - 1].start
if sorted_list[ii - 1].end > sorted_list[ii].end:
sorted_list[ii].end = sorted_list[ii - 1].end
sorted_list[ii - 1] = None
return [ii for ii in sorted_list if ii != None]
def main():
a = Event(1, 3)
b = Event(2, 6)
c = Event(8, 10)
d = Event(15, 18)
temp_list = find_merge_intervals([a, b, c, d])
for ii in temp_list:
ii.print_times()
if __name__ == "__main__":
main()
|
[
"kelkarmhr@gmail.com"
] |
kelkarmhr@gmail.com
|
f1b87c3c3ede937152c85642a188029018b7f37e
|
8575ccf9e7e6b2257ec7aee1539c91afa90d65a5
|
/nlp/_02_textcluster/utils.py
|
620fa8b64c54d929715f4fe7148b0dfb2c4d06a3
|
[] |
no_license
|
oaifaye/pyfirst
|
86b8765751175f0be0fe3f95850ff018eacf51d3
|
e8661b5adf53afd47fa5cb6f01cd76535d8fc8b9
|
refs/heads/master
| 2021-12-12T00:33:39.523597
| 2021-08-13T08:32:10
| 2021-08-13T08:32:10
| 160,138,715
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
# -*- coding: utf-8 -*-
import pickle
import re
def readbunchobj(path):
file_obj = open(path,'rb')
bunch = pickle.load(file_obj)
file_obj.close()
return bunch
def writebunchobj(path,bunchobj):
file_obj = open(path,'wb')
pickle.dump(bunchobj,file_obj)
file_obj.close()
def readfile(savepath,encoding='UTF-8'):
fp = open(savepath,'r',encoding=encoding )
content = fp.read()
fp.close()
return content
def savefile(savepath,content,encoding='UTF-8'):
fp = open(savepath,'w',encoding=encoding)
fp.write(content)
fp.close()
def removeHTML(content):
content = re.sub('<\s*head[^>]*>[^<]*<\s*/\s*head\s*>','',content)
content = re.sub('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>','',content)
content = re.sub('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>','',content)
content = re.sub('<\s*HEAD[^>]*>[^<]*<\s*/\s*HEAD\s*>','',content)
content = re.sub('<\s*STYLE[^>]*>[^<]*<\s*/\s*STYLE\s*>','',content)
content = re.sub('<\s*SCRIPT[^>]*>[^<]*<\s*/\s*SCRIPT\s*>','',content)
content = re.sub('<[^>]+>','',content)
content = re.sub('%!.*!%','',content)
content = content.replace("\r\n","").strip()
content = content.replace("\n","").strip()
content = content.replace("\t","").strip()
content = content.replace(" ","").strip()
content = content.replace(" ","").strip()
content = content.replace(" ","").strip()
content = content.replace("“","").strip()
content = content.replace("•","").strip()
content = content.replace("”","").strip()
content = re.sub("[\s+\.\!\/_,$%^*(+\"\')]+|[+——()?【】“”!,。?、~@#¥%……&*():《》「」•●]+", "",content)
return content
# str = readfile("D:\\pythonCode\\First\\nlp\\_01_textclassify\\fastpredict\\content.txt")
# print(removeHTML(str))
|
[
"slf_work@hotmail.com"
] |
slf_work@hotmail.com
|
0e5702ddf50c99377a738187f828539c6537451a
|
dfaf0169a799d81535c952a5c284d2ff6b8f2265
|
/asgn_1/asgn_1_8.py
|
bd61dc0116ae78b066e394a8aa166f9998c72e61
|
[] |
no_license
|
RahulBantode/Python_Task_-OOPS-
|
9f10cff655691518ed7147abe6503dee6013ff96
|
ea7ad00e109349b999ec97588c59fb3b03a69bff
|
refs/heads/main
| 2023-05-25T07:29:57.714579
| 2021-06-14T06:42:49
| 2021-06-14T06:42:49
| 346,242,290
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
''' statement : write a program which accept number from user and print that number of * on screen'''
def PrintStar(value):
i=1
while i <= value:
print("*\t",end=" ")
i = i+1
def main():
no = int(input("How many times you want print \"*\" on screen : "))
PrintStar(no)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
RahulBantode.noreply@github.com
|
e51e5b3ae82fc2479ec96ebc8b9d4e43855b4b4f
|
fed6c6bdb6276d195bc565e527c3f19369d22b74
|
/test/separation_angle_test/cal_sep_ang_astropy.py
|
c0b80cc508f5c2ebba633e20ab73f9197f6aba4f
|
[] |
no_license
|
hekunlie/astrophy-research
|
edbe12d8dde83e0896e982f08b463fdcd3279bab
|
7b2b7ada7e7421585e8993192f6111282c9cbb38
|
refs/heads/master
| 2021-11-15T05:08:51.271669
| 2021-11-13T08:53:33
| 2021-11-13T08:53:33
| 85,927,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
from astropy.coordinates import SkyCoord
from astropy import units
from sys import argv
ra1, dec1 = float(argv[1]), float(argv[2])
ra2, dec2 = float(argv[3]), float(argv[4])
c1 = SkyCoord(ra=ra1*units.deg, dec=dec1*units.deg,frame="fk5")
c2 = SkyCoord(ra=ra2*units.deg, dec=dec2*units.deg,frame="fk5")
sep = c1.separation(c2)
print("(%10.5f,%10.5f) <-- %10.5f rad (%10.5f deg) --> (%10.5f,%10.5f)"%(ra1, dec1,sep.radian, sep.deg,ra2, dec2))
|
[
"hekun_lee@sjtu.edu.cn"
] |
hekun_lee@sjtu.edu.cn
|
b98b5daca8f6e76fde1e08f8c2ad2abf8451feeb
|
d7390fea6c7f712ee32be6d3478835d965d795e0
|
/py26_24day/py26_api_test/testcases/test_add.py
|
fe1e0c2cdd31c1995b8b91963f0cff637e38ac2d
|
[] |
no_license
|
luwenchun/Automated_Test
|
2f424655d80127e3ed98657869021a775beca868
|
79b9937cfc0841b0a80d4fd45d8ff467654b5b55
|
refs/heads/master
| 2021-02-10T15:23:08.446463
| 2020-03-26T10:39:38
| 2020-03-26T10:39:38
| 244,393,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,037
|
py
|
"""
============================
Author:柠檬班-木森
Time:2020/2/28 21:21
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import os
import unittest
import jsonpath
from py26_24day.py26_api_test.common.readexcel import ReadExcel
from py26_24day.py26_api_test.common.handlepath import DATADIR
from py26_24day.py26_api_test.library.ddt import ddt, data
from py26_24day.py26_api_test.common.handleconfig import conf
from py26_24day.py26_api_test.common.handlerequests import SendRequest
from py26_24day.py26_api_test.common.handle_data import CaseDate, replace_data
from py26_24day.py26_api_test.common.handlelog import log
file_path = os.path.join(DATADIR, "apicases.xlsx")
@ddt
class TESTAdd(unittest.TestCase):
excel = ReadExcel(file_path, "add")
cases = excel.read_data()
request = SendRequest()
@classmethod
def setUpClass(cls):
"""管理员账户登录"""
url = conf.get("env", "url") + "/member/login"
data = {
"mobile_phone": conf.get("test_data", "admin_phone"),
"pwd": conf.get("test_data", "admin_pwd")
}
headers = eval(conf.get("env", "headers"))
response = cls.request.send(url=url, method="post", json=data, headers=headers)
res = response.json()
token = jsonpath.jsonpath(res, "$..token")[0]
token_type = jsonpath.jsonpath(res, "$..token_type")[0]
member_id = str(jsonpath.jsonpath(res, "$..id")[0])
# 将提取的数据保存到CaseData的属性中
CaseDate.admin_token_value = token_type + " " + token
CaseDate.admin_member_id = member_id
@data(*cases)
def test_add(self, case):
# 第一步:准备数据
url = conf.get("env", "url") + case["url"]
headers = eval(conf.get("env", "headers"))
headers["Authorization"] = getattr(CaseDate, "admin_token_value")
data = eval(replace_data(case["data"]))
expected = eval(case["expected"])
method = case["method"]
row = case["case_id"] + 1
# 第二步:发请求获取实际结果
response = self.request.send(url=url, method=method, json=data, headers=headers)
res = response.json()
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(expected["code"], res["code"])
self.assertEqual(expected["msg"], res["msg"])
# 数据库校验
except AssertionError as e:
print("预期结果:", expected)
print("实际结果:", res)
self.excel.write_data(row=row, column=8, value="未通过")
log.error("用例:{},执行未通过".format(case["title"]))
log.exception(e)
raise e
else:
self.excel.write_data(row=row, column=8, value="通过")
log.info("用例:{},执行未通过".format(case["title"]))
|
[
"luwenchun@users.noreply.github.com"
] |
luwenchun@users.noreply.github.com
|
3d058190a3b777ae763a5449f9e672a762accbc5
|
5a01774b1815a3d9a5b02b26ca4d6ba9ecf41662
|
/Module 2/Chapter04/django-myproject-04/likes/views.py
|
8ee413f495c99889e0eb88da77a52dc41aa3f015
|
[
"MIT"
] |
permissive
|
PacktPublishing/Django-Web-Development-with-Python
|
bf08075ff0a85df41980cb5e272877e01177fd07
|
9f619f56553b5f0bca9b5ee2ae32953e142df1b2
|
refs/heads/master
| 2023-04-27T22:36:07.610076
| 2023-01-30T08:35:11
| 2023-01-30T08:35:11
| 66,646,080
| 39
| 41
|
MIT
| 2023-04-17T10:45:45
| 2016-08-26T12:30:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from .models import Like
from .templatetags.likes_tags import get_likes_count
@never_cache
@csrf_exempt
def json_set_like(request, content_type_id, object_id):
"""
Sets the object as a favorite for the current user
"""
result = {
"success": False,
}
if request.user.is_authenticated() and request.method == "POST":
content_type = ContentType.objects.get(id=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
like, is_created = Like.objects.get_or_create(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk,
user=request.user,
)
if not is_created:
like.delete()
result = {
"success": True,
"obj": unicode(obj),
"action": is_created and "added" or "removed",
"count": get_likes_count(obj),
}
json_str = json.dumps(result, ensure_ascii=False, encoding="utf8")
return HttpResponse(json_str, content_type="application/json; charset=utf-8")
@login_required(login_url=reverse_lazy("admin:login"))
def liked_object_list(request):
likes = Like.objects.filter(user=request.user)
return render(request, "likes/liked_object_list.html", {"object_list": likes})
|
[
"bhavinsavalia@packtpub.com"
] |
bhavinsavalia@packtpub.com
|
afa4af83ece4b4704127d44f4e5527dced662658
|
dd204762b0a9cdd93634e15ec981b5f868ec4315
|
/apps/documents/migrations/0004_invoice.py
|
6d174b1c1c2357bab34e08fcb0a6a92b8b9b6634
|
[] |
no_license
|
pannkotsky/sales_outlet
|
49713b39759d8c3dbedfce96953ba9c47db3d521
|
15753582f8413a98ad7259bb6a3d62e32415f632
|
refs/heads/master
| 2021-08-30T11:05:23.589421
| 2017-12-17T16:19:56
| 2017-12-17T16:19:56
| 112,077,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-29 20:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20171129_2118'),
('documents', '0003_auto_20171129_2210'),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('number', models.CharField(max_length=15, primary_key=True, serialize=False, verbose_name='Number')),
('date', models.DateField(verbose_name='Date')),
('product_quantity', models.IntegerField(verbose_name='Product quantity')),
('contract', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='documents.Contract', verbose_name='Contract')),
('packaging', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoices', to='products.Packaging', verbose_name='Packaging')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='products.Product', verbose_name='Product')),
],
options={
'verbose_name': 'Invoice',
'verbose_name_plural': 'Invoices',
},
),
]
|
[
"kovvalole@gmail.com"
] |
kovvalole@gmail.com
|
23e5189244ab17f8795a0d2d136873de29c91f73
|
74482894c61156c13902044b4d39917df8ed9551
|
/cryptoapis/model/address_tokens_transaction_unconfirmed_ethereumerc721token.py
|
78e6e8cccb380fb74e25368b6c6d05792ff6fb74
|
[
"MIT"
] |
permissive
|
xan187/Crypto_APIs_2.0_SDK_Python
|
bb8898556ba014cc7a4dd31b10e24bec23b74a19
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
refs/heads/main
| 2023-06-22T15:45:08.273635
| 2021-07-21T03:41:05
| 2021-07-21T03:41:05
| 387,982,780
| 1
| 0
|
NOASSERTION
| 2021-07-21T03:35:29
| 2021-07-21T03:35:29
| null |
UTF-8
|
Python
| false
| false
| 7,724
|
py
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedEthereumerc721token(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'symbol': (str,), # noqa: E501
'token_id': (str,), # noqa: E501
'contract_address': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'symbol': 'symbol', # noqa: E501
'token_id': 'tokenId', # noqa: E501
'contract_address': 'contractAddress', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, symbol, token_id, contract_address, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedEthereumerc721token - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
symbol (str): Specifies an identifier of the token, where up to five alphanumeric characters can be used for it.
token_id (str): Specifies the unique ID of the token.
contract_address (str): Specifies the address of the contract.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.symbol = symbol
self.token_id = token_id
self.contract_address = contract_address
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"kristiyan.ivanov@menasoftware.com"
] |
kristiyan.ivanov@menasoftware.com
|
3ebe10cba243e3e9beff7ce90b952a15a1e05a57
|
7bcec8a9c6a240ec0888bec4179f536046464005
|
/moviesys/moviesys/.history/library/views_20210324175438.py
|
b6544870318d275779993f680f8eb2a059283e1a
|
[] |
no_license
|
yifanzhang13/MovieManagementSystem_group5
|
c64e5810914c3d33ae6cd94e8eed5dc5a3962181
|
4cca1a4299311681d69b2347ca8d7b02e0846ebc
|
refs/heads/main
| 2023-03-29T08:30:26.655108
| 2021-04-01T15:42:52
| 2021-04-01T15:42:52
| 344,417,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,616
|
py
|
from django.shortcuts import render
from .models import Movies, Users, Ratings, Links, Tags
from django.db import connection
from django.views import generic
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from library.forms import SearchMovieForm
# Create your views here.
def index(request):
cursor = connection.cursor()
try:
num_movies = cursor.execute('SELECT * FROM library_movies')
rating_5 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 5')
rating_4 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 4')
rating_3 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 3')
rating_2 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 2')
rating_1 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 1')
finally:
cursor.close()
context = {
'num_movies':num_movies,
'rating_5':rating_5,
'rating_4':rating_4,
'rating_3':rating_3,
'rating_2':rating_2,
'rating_1':rating_1,
}
return render(request, 'index.html', context=context)
def MoviesView(request):
cursor = connection.cursor()
try:
movies = cursor.execute('SELECT * FROM library_movies')
results = cursor.fetchall()
finally:
cursor.close()
all = []
for row in results:
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
all.append(dic)
context = {
'movies':all,
}
return render(request, 'Movies.html', context=context)
class MovieDetailView(generic.DetailView):
model = Movies
def MovieDetail(request):
# form = SearchMovieForm()
# if request.method == 'POST':
# form = SearchMovieForm(request.POST)
# if form.is_valid():
# return HttpResponseRedirect('http://127.0.0.1:8000/library/movies/'+str(2))
# context = {
# 'form': form,
# }
# return render(request, 'library/movies_list.html', context)
class MoviesListView(generic.ListView):
# The generic view will query the database to get all records for the specified model
# (Movies) then render a template located
# at /locallibrary/catalog/templates/catalog/Movies_list.html (which we will create below).
# Within the template you can access the list of books with the
# template variable named object_list OR book_list (i.e. generically "the_model_name_list").
model = Movies
|
[
"yifancheung13@gmail.com"
] |
yifancheung13@gmail.com
|
0d3b8f95ece7e037e2f572adc7c258d76b25e936
|
6418c60849119c2e956bf534c4118ec4858de648
|
/ax/modelbridge/transforms/one_hot.py
|
f16d151519ced8ad0ce026918093c5e33cc43449
|
[
"MIT"
] |
permissive
|
MalkeshDalia/Ax
|
c1595871871bd18183ad03692487f33df760bfaa
|
f458275d96c858cddc835dfefd34114de34d8b28
|
refs/heads/master
| 2023-04-16T00:34:19.726437
| 2019-05-01T05:16:22
| 2019-05-01T05:16:23
| 184,374,837
| 1
| 0
|
MIT
| 2023-03-21T21:47:24
| 2019-05-01T05:51:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,387
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Dict, List, Optional, TypeVar
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.parameter import ChoiceParameter, Parameter, ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig, TParameterization
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.rounding import (
randomized_onehot_round,
strict_onehot_round,
)
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
OH_PARAM_INFIX = "_OH_PARAM_"
T = TypeVar("T")
class OneHotEncoder:
"""Joins the two encoders needed for OneHot transform."""
int_encoder: LabelEncoder
label_binarizer: LabelBinarizer
def __init__(self, values: List[T]) -> None:
self.int_encoder = LabelEncoder().fit(values)
self.label_binarizer = LabelBinarizer().fit(self.int_encoder.transform(values))
def transform(self, labels: List[T]) -> np.ndarray:
"""One hot encode a list of labels."""
return self.label_binarizer.transform(self.int_encoder.transform(labels))
def inverse_transform(self, encoded_labels: List[T]) -> List[T]:
"""Inverse transorm a list of one hot encoded labels."""
return self.int_encoder.inverse_transform(
self.label_binarizer.inverse_transform(encoded_labels)
)
@property
def classes(self) -> np.ndarray:
"""Return number of classes discovered while fitting transform."""
return (
self.label_binarizer.classes_ # pyre-ignore[16]: missing attribute classes_
)
class OneHot(Transform):
"""Convert categorical parameters (unordered ChoiceParameters) to
one-hot-encoded parameters.
Does not convert task parameters.
Parameters will be one-hot-encoded, yielding a set of RangeParameters,
of type float, on [0, 1]. If there are two values, one single RangeParameter
will be yielded, otherwise there will be a new RangeParameter for each
ChoiceParameter value.
In the reverse transform, floats can be converted to a one-hot encoded vector
using one of two methods:
Strict rounding: Choose the maximum value. With levels ['a', 'b', 'c'] and
float values [0.2, 0.4, 0.3], the restored parameter would be set to 'b'.
Ties are broken randomly, so values [0.2, 0.4, 0.4] is randomly set to 'b'
or 'c'.
Randomized rounding: Sample from the distribution. Float values
[0.2, 0.4, 0.3] are transformed to 'a' w.p.
0.2/0.9, 'b' w.p. 0.4/0.9, or 'c' w.p. 0.3/0.9.
Type of rounding can be set using transform_config['rounding'] to either
'strict' or 'randomized'. Defaults to strict.
Transform is done in-place.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
config: Optional[TConfig] = None,
) -> None:
# Identify parameters that should be transformed
self.rounding = "strict"
if config is not None:
self.rounding = config.get("rounding", "strict")
self.encoder: Dict[str, OneHotEncoder] = {}
self.encoded_parameters: Dict[str, List[str]] = {}
for p in search_space.parameters.values():
if isinstance(p, ChoiceParameter) and not p.is_ordered and not p.is_task:
self.encoder[p.name] = OneHotEncoder(p.values)
nc = len(self.encoder[p.name].classes)
if nc == 2:
# Two levels handled in one parameter
self.encoded_parameters[p.name] = [p.name + OH_PARAM_INFIX]
else:
self.encoded_parameters[p.name] = [
"{}{}_{}".format(p.name, OH_PARAM_INFIX, i) for i in range(nc)
]
def transform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, encoder in self.encoder.items():
if p_name in obsf.parameters:
vals = encoder.transform(labels=[obsf.parameters.pop(p_name)])[0]
updated_parameters: TParameterization = {
self.encoded_parameters[p_name][i]: v
for i, v in enumerate(vals)
}
obsf.parameters.update(updated_parameters)
return observation_features
def transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
transformed_parameters: Dict[str, Parameter] = {}
for p in search_space.parameters.values():
if p.name in self.encoded_parameters:
for new_p_name in self.encoded_parameters[p.name]:
transformed_parameters[new_p_name] = RangeParameter(
name=new_p_name,
parameter_type=ParameterType.FLOAT,
lower=0,
upper=1,
)
else:
transformed_parameters[p.name] = p
return SearchSpace(
parameters=list(transformed_parameters.values()),
parameter_constraints=[
pc.clone() for pc in search_space.parameter_constraints
],
)
def untransform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name in self.encoder.keys():
x = np.array(
[obsf.parameters.pop(p) for p in self.encoded_parameters[p_name]]
)
if self.rounding == "strict":
x = strict_onehot_round(x)
else:
x = randomized_onehot_round(x)
val = self.encoder[p_name].inverse_transform(encoded_labels=x[None, :])[
0
]
if isinstance(val, np.bool_):
val = bool(val) # Numpy bools don't serialize
obsf.parameters[p_name] = val
return observation_features
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
e843d418d54ba8d491c42c79498e47d2b6f448d8
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_chart_axis15.py
|
6fa48ec645b2b45ff9c7624ac1320000ad10b66f
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336
| 2023-07-08T16:54:37
| 2023-07-08T16:54:37
| 353,636,960
| 0
| 0
|
NOASSERTION
| 2021-04-01T08:57:21
| 2021-04-01T08:57:20
| null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis15.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45705856, 54518528]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'minor_unit': 0.4, 'major_unit': 2})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
5502baa5078bace5351800777e9e71092e41e9ec
|
5779d964d5ee42b586697a640ff0f977e0fa1e55
|
/test/test_access_approval_services_api.py
|
1e9f8cd053340a2548c2d46ca89069e1132a2cae
|
[] |
no_license
|
thomasyu888/synpy-sdk-client
|
03db42c3c8411c8c1f8808e1145d7c2a8bcc3df1
|
d1e19e26db5376c78c4ce0ff181ac3c4e0709cbb
|
refs/heads/main
| 2023-02-28T09:33:12.386220
| 2021-02-02T15:09:59
| 2021-02-02T15:09:59
| 333,744,741
| 3
| 0
| null | 2021-01-30T12:10:50
| 2021-01-28T11:57:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
# coding: utf-8
"""
Platform Repository Service
Platform Repository Service - Sage Bionetworks Platform # noqa: E501
The version of the OpenAPI document: develop-SNAPSHOT
Contact: thomas.yu@sagebionetworks.org
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import synclient
from synclient.api.access_approval_services_api import AccessApprovalServicesApi # noqa: E501
from synclient.rest import ApiException
class TestAccessApprovalServicesApi(unittest.TestCase):
"""AccessApprovalServicesApi unit test stubs"""
def setUp(self):
self.api = synclient.api.access_approval_services_api.AccessApprovalServicesApi() # noqa: E501
def tearDown(self):
pass
def test_get_team_access_approvals(self):
"""Test case for get_team_access_approvals
Retrieve the Access Approvals for the given Team. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"thomas.yu@sagebase.org"
] |
thomas.yu@sagebase.org
|
ab0a941410dac6aa32f28e272e64bd732c1ef4af
|
93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3
|
/python/paddle/fluid/tests/unittests/test_input_spec.py
|
e329a37488a2cb8234532cd0a9beb7a1a25e72a6
|
[
"Apache-2.0"
] |
permissive
|
hutuxian/Paddle
|
f8b7693bccc6d56887164c1de0b6f6e91cffaae8
|
a1b640bc66a5cc9583de503e7406aeba67565e8d
|
refs/heads/develop
| 2023-08-29T19:36:45.382455
| 2020-09-09T09:19:07
| 2020-09-09T09:19:07
| 164,977,763
| 8
| 27
|
Apache-2.0
| 2023-06-16T09:47:39
| 2019-01-10T02:50:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,615
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.static import InputSpec
from paddle.fluid.framework import core, convert_np_dtype_to_dtype_
class TestInputSpec(unittest.TestCase):
def test_default(self):
tensor_spec = InputSpec([3, 4])
self.assertEqual(tensor_spec.dtype,
convert_np_dtype_to_dtype_('float32'))
self.assertEqual(tensor_spec.name, None)
def test_from_tensor(self):
x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)
bool_spec = InputSpec.from_tensor(x_bool)
self.assertEqual(bool_spec.dtype, x_bool.dtype)
self.assertEqual(bool_spec.shape, x_bool.shape)
self.assertEqual(bool_spec.name, x_bool.name)
bool_spec2 = InputSpec.from_tensor(x_bool, name='bool_spec')
self.assertEqual(bool_spec2.name, bool_spec2.name)
def test_from_numpy(self):
x_numpy = np.ones([10, 12])
x_np_spec = InputSpec.from_numpy(x_numpy)
self.assertEqual(x_np_spec.dtype,
convert_np_dtype_to_dtype_(x_numpy.dtype))
self.assertEqual(x_np_spec.shape, x_numpy.shape)
self.assertEqual(x_np_spec.name, None)
x_numpy2 = np.array([1, 2, 3, 4]).astype('int64')
x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64')
self.assertEqual(x_np_spec2.dtype,
convert_np_dtype_to_dtype_(x_numpy2.dtype))
self.assertEqual(x_np_spec2.shape, x_numpy2.shape)
self.assertEqual(x_np_spec2.name, 'x_np_int64')
def test_shape_with_none(self):
tensor_spec = InputSpec([None, 4, None], dtype='int8', name='x_spec')
self.assertEqual(tensor_spec.dtype, convert_np_dtype_to_dtype_('int8'))
self.assertEqual(tensor_spec.name, 'x_spec')
self.assertEqual(tensor_spec.shape, (-1, 4, -1))
def test_shape_raise_error(self):
# 1. shape should only contain int and None.
with self.assertRaises(ValueError):
tensor_spec = InputSpec(['None', 4, None], dtype='int8')
# 2. shape should be type `list` or `tuple`
with self.assertRaises(TypeError):
tensor_spec = InputSpec(4, dtype='int8')
# 3. len(shape) should be greater than 0.
with self.assertRaises(ValueError):
tensor_spec = InputSpec([], dtype='int8')
def test_batch_and_unbatch(self):
tensor_spec = InputSpec([10])
# insert batch_size
batch_tensor_spec = tensor_spec.batch(16)
self.assertEqual(batch_tensor_spec.shape, (16, 10))
# unbatch
unbatch_spec = batch_tensor_spec.unbatch()
self.assertEqual(unbatch_spec.shape, (10, ))
# 1. `unbatch` requires len(shape) > 1
with self.assertRaises(ValueError):
unbatch_spec.unbatch()
# 2. `batch` requires len(batch_size) == 1
with self.assertRaises(ValueError):
tensor_spec.batch([16, 12])
# 3. `batch` requires type(batch_size) == int
with self.assertRaises(TypeError):
tensor_spec.batch('16')
def test_eq_and_hash(self):
tensor_spec_1 = InputSpec([10, 16], dtype='float32')
tensor_spec_2 = InputSpec([10, 16], dtype='float32')
tensor_spec_3 = InputSpec([10, 16], dtype='float32', name='x')
tensor_spec_4 = InputSpec([16], dtype='float32', name='x')
# override ``__eq__`` according to [shape, dtype, name]
self.assertTrue(tensor_spec_1 == tensor_spec_2)
self.assertTrue(tensor_spec_1 != tensor_spec_3) # different name
self.assertTrue(tensor_spec_3 != tensor_spec_4) # different shape
# override ``__hash__`` according to [shape, dtype]
self.assertTrue(hash(tensor_spec_1) == hash(tensor_spec_2))
self.assertTrue(hash(tensor_spec_1) == hash(tensor_spec_3))
self.assertTrue(hash(tensor_spec_3) != hash(tensor_spec_4))
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
hutuxian.noreply@github.com
|
51e616e51e931fd86dabb8d7893a8226d10e3bb7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_minion.py
|
043f4450fe5016c580399725d455d2292bd254fe
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
#calss header
class _MINION():
def __init__(self,):
self.name = "MINION"
self.definitions = [u'a person who is not important and who has to do what another person of higher rank orders them to do: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
01546daeac217a5c225258efff378b7d9a7da2b1
|
e41651d8f9b5d260b800136672c70cb85c3b80ff
|
/Notification_System/temboo/Library/Yelp/SearchForBusiness.py
|
1c3fc99c24d7c29ab3d39126b3e66458540807c7
|
[] |
no_license
|
shriswissfed/GPS-tracking-system
|
43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c
|
1c5e90a483386bd2e5c5f48f7c5b306cd5f17965
|
refs/heads/master
| 2020-05-23T03:06:46.484473
| 2018-10-03T08:50:00
| 2018-10-03T08:50:00
| 55,578,217
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,654
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SearchForBusiness
# Retrieves information for a given business id or name.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchForBusiness(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchForBusiness Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchForBusiness, self).__init__(temboo_session, '/Library/Yelp/SearchForBusiness')
def new_input_set(self):
return SearchForBusinessInputSet()
def _make_result_set(self, result, path):
return SearchForBusinessResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchForBusinessChoreographyExecution(session, exec_id, path)
class SearchForBusinessInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchForBusiness
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_BusinessId(self, value):
"""
Set the value of the BusinessId input for this Choreo. ((conditional, string) The business id to return results for. This can be found in the URL when you're on the business page on yelp.com (i.e. "yelp-san-francisco"). This is required unless using the BusinessName input.)
"""
super(SearchForBusinessInputSet, self)._set_input('BusinessId', value)
def set_BusinessName(self, value):
"""
Set the value of the BusinessName input for this Choreo. ((conditional, string) A business name to search for. This is required unless using the BusinessId input.)
"""
super(SearchForBusinessInputSet, self)._set_input('BusinessName', value)
def set_Category(self, value):
"""
Set the value of the Category input for this Choreo. ((optional, string) The category to filter search results with when searching by BusinessName. This can be a list of comma delimited categories. For example, "bars,french". This can used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Category', value)
def set_City(self, value):
"""
Set the value of the City input for this Choreo. ((conditional, string) The name of the city in which to search for businesses. This is required when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('City', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('ConsumerSecret', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) The number of business results to return when searching by BusinessName. The maxiumum is 20.)
"""
super(SearchForBusinessInputSet, self)._set_input('Count', value)
def set_CountryCode(self, value):
"""
Set the value of the CountryCode input for this Choreo. ((optional, string) The ISO 3166-1 2-digit country code to use when parsing the location field. United States = US, Canada = CA, United Kingdom = GB. This can be used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('CountryCode', value)
def set_Deals(self, value):
"""
Set the value of the Deals input for this Choreo. ((optional, string) Set to "true" to exclusively search for businesses with deals. This can used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Deals', value)
def set_LanguageCode(self, value):
"""
Set the value of the LanguageCode input for this Choreo. ((optional, string) The ISO 639 language code. Default to "en". Reviews and snippets written in the specified language will be returned. This can be used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('LanguageCode', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Offsets the list of returned business results by this amount when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Offset', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, multiline) The format of the response from Yelp, either XML or JSON (the default).)
"""
super(SearchForBusinessInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, integer) The sort mode: 0 = Best matched, 1 = Distance (default), 2 = Highest Rated. This can be used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Sort', value)
def set_Token(self, value):
"""
Set the value of the Token input for this Choreo. ((required, string) The Token provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('Token', value)
def set_TokenSecret(self, value):
"""
Set the value of the TokenSecret input for this Choreo. ((required, string) The Token Secret provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('TokenSecret', value)
class SearchForBusinessResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchForBusiness Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Yelp. Corresponds to the input value for ResponseFormat (defaults to JSON).)
"""
return self._output.get('Response', None)
class SearchForBusinessChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchForBusinessResultSet(response, path)
|
[
"shriswissfed@gmail.com"
] |
shriswissfed@gmail.com
|
cf01f1e148b59200162b21c0e689315de517efa4
|
e8878773d36892c74d6bbe4c257a57e23fb9e7e8
|
/backend/location/admin.py
|
6988b9097daa65c15498009d314ee94c58f8cb9d
|
[] |
no_license
|
crowdbotics-apps/logintest-29319
|
8831f9a4f85292518b794406c4082b7212b736a5
|
45e9ec4fa8fce8e04caa98e5bc13ebfed81baaf3
|
refs/heads/master
| 2023-06-30T11:25:57.766718
| 2021-08-01T17:19:28
| 2021-08-01T17:19:28
| 391,687,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django.contrib import admin
from .models import TaskerLocation, MapLocation, CustomerLocation, TaskLocation
admin.site.register(MapLocation)
admin.site.register(TaskerLocation)
admin.site.register(CustomerLocation)
admin.site.register(TaskLocation)
# Register your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2df038e4cf1c2ec2746999fc1b87d64d610b8b04
|
f846b6997562165df3a0463bad9728395db5ec8e
|
/app/pages/base/page.py
|
3e373cfaa83b3e524d2f0c6abd32e23c5efaa536
|
[] |
no_license
|
gonza56d/burner
|
9f687485a4728bcbc85e1fc5918be2eeed4fc8f8
|
c21f4d17215b47c3a8bfba460daf71505185b568
|
refs/heads/master
| 2023-08-04T17:27:11.883836
| 2021-07-04T21:26:08
| 2021-07-04T21:26:08
| 408,569,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
# Python
from abc import ABC, abstractmethod
from typing import Generator
class BasePage(ABC):
"""Abstract class that implements how to get and store the data regarding
subclass attributes.
"""
@abstractmethod
def get_page_name(self) -> str:
"""Implement the name of the page to store results from.
Return
------
str : Name of the page.
"""
pass
@abstractmethod
def get_categories_storage_filename(self) -> str:
"""Implement the name under which the categories CSV files are stored.
Return
------
str : Filename for categories CSVs.
"""
pass
@abstractmethod
def get_products_storage_filename(self) -> str:
"""Implement the name under which the products CSV files are stored.
Return
------
str : Filename for products CSVs.
"""
pass
@property
@abstractmethod
def furnitures_categories(self) -> Generator:
"""Implement how to get furnitures categories from page.
Return
------
Generator : yield from furnitures found.
"""
pass
@abstractmethod
def get_product_id_lookup(soup_product) -> str:
"""Implement lookup to find product id.
Return
------
str : ID of the given BS4 product.
"""
pass
@abstractmethod
def get_product_url_lookup(soup_product) -> str:
"""Implement lookup to find product url.
Return
------
str : URL of the given BS4 product.
"""
pass
@abstractmethod
def get_product_name_lookup(soup_product) -> str:
"""Implement lookup to find product name.
Return
------
str : Name of the given BS4 product.
"""
pass
@abstractmethod
def get_product_price_lookup(soup_product) -> float:
"""Implement lookup to find product price.
Return
------
float : Price of the given BS4 product.
"""
pass
|
[
"gonza56d@gmail.com"
] |
gonza56d@gmail.com
|
6e8c4b912b4d24612e702fb71d21bd1cb9d4d22d
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_248/ch153_2020_04_13_20_48_31_775267.py
|
e5e5b914943c693b478c062703701e840b52b37b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
def agrupa_por_idade(dic):
dic={'João': 10, 'Maria': 8, 'Miguel': 20, 'Helena': 67, 'Alice': 50}
dic2={}
k=dic.values()
for i in k:
if i<=11:
dic2[i]='criança'
elif i>11 and i<18:
dic2[i]='adolescente'
elif i>17 and i<60:
dic2[i]='adulto'
else:
dic2[i]='idoso'
return dic2
|
[
"you@example.com"
] |
you@example.com
|
a13090e72f2f244bcb3ccd776e7dee54df3c55e2
|
707287238a36b8e5f3e26c347cca580549b441e5
|
/combgen/gray_graph/multiradix.py
|
9128fc33cef7e9711baa3cb63173e8f51a80e05a
|
[] |
no_license
|
sahands/coroutine-generation
|
2a01e3c5a36fc6b82d8087a15591a452e4bca636
|
f0b318016b8925b2ab16640a588210548f7989db
|
refs/heads/master
| 2016-09-06T04:54:02.453166
| 2015-01-06T21:32:58
| 2015-01-06T21:32:58
| 17,954,406
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
from combgen.multiradix_gray.coroutine import gen_all
from .grapher import generate_pgf_gray_graph
def dist(a, b):
return sum(abs(x - y) for x, y in zip(a, b))
def neighbour(u, v):
return dist(u, v) == 1
def to_str(a):
return '"${}$"'.format(''.join(str(x) for x in a))
def main():
M = [3, 2, 3]
generate_pgf_gray_graph(gen_all, neighbour, to_str, M)
# print()
# print()
# generate_pgf_gray_graph(multiradix_gray_coroutine, neighbour, to_str, M)
if __name__ == '__main__':
main()
|
[
"sahands@gmail.com"
] |
sahands@gmail.com
|
7bd3f3f1fd8443f552fec39bb8e367be7c3eb367
|
15c3ee205f83854a219f4893c5b78a872b6717e3
|
/liquepy/num/o3.py
|
a1f78597692fea653156816a0458cb06911a2570
|
[
"MIT"
] |
permissive
|
geosharma/liquepy
|
7def9e793915268ce9365e474749bdf8d0aa3166
|
05d810fe643128b60430bf7f91be899cab9542c6
|
refs/heads/master
| 2022-02-18T17:24:44.446980
| 2022-02-13T16:57:47
| 2022-02-13T16:57:47
| 219,091,135
| 4
| 0
|
MIT
| 2021-03-20T13:52:20
| 2019-11-02T02:11:33
| null |
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
from liquepy.num.models import PM4Sand as PM4SandBase
from liquepy.num.models import StressDensityModel as StressDensityModelBase
from liquepy.num import models
class PM4Sand(PM4SandBase):
type = "pm4sand"
o3_type = 'pm4sand'
def __init__(self, wmd=None, liq_mass_density=None, liq_sg=1.0, g=9.8, p_atm=101000.0, **kwargs):
PM4SandBase.__init__(self, wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, p_atm=p_atm, **kwargs)
self._extra_class_inputs = []
self.app2mod = {
'd_r': 'relative_density',
'g_o': 'g0_mod',
'den': 'unit_moist_mass',
'nu': 'poissons_ratio'
}
def __repr__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
def __str__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
class ManzariDafaliasModel(models.ManzariDafaliasModel):
o3_type = 'manzaridafalias_model'
def __init__(self, wmd=None, liq_mass_density=None, liq_sg=1.0, g=9.8, p_atm=101000.0, **kwargs):
models.ManzariDafaliasModel.__init__(self, wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, p_atm=p_atm, **kwargs)
self._extra_class_inputs = []
self.app2mod = {
'den': 'unit_moist_mass',
'nu': 'poissons_ratio'
}
def __repr__(self):
return f"ManzariDafaliasModelO3 Soil model, id={self.id}, m_c={self.m_c:.1f}, e_curr={self.e_curr:.2f}"
def __str__(self):
return f"ManzariDafaliasModelO3 Soil model, id={self.id}, m_c={self.m_c:.1f}, e_curr={self.e_curr:.2f}"
class StressDensityModel(StressDensityModelBase):
type = "stress_density_model"
def __init__(self, wmd=None, liq_mass_density=None, liq_sg=1.0, g=9.8, p_atm=101000.0, **kwargs):
super(StressDensityModel, self).__init__(wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, p_atm=p_atm, **kwargs)
self._extra_class_inputs = []
self.app2mod = {
'e_init': 'e_curr',
'den': 'unit_moist_mass',
'nu': 'poissons_ratio',
'n': 'a'
}
def __repr__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
def __str__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
|
[
"maxim.millen@gmail.com"
] |
maxim.millen@gmail.com
|
e3bd554ad63149e3043d6a8c0f9163f73ce18252
|
1625edfe28b4b0979fd32b4a3c5e55249a993fd5
|
/baekjoon4504.py
|
487d5335911e971092814510b9c7ce504d665229
|
[] |
no_license
|
beOk91/baekjoon2
|
b8bf504c506c6278899d4107ecfe51974ef13f5e
|
39569f8effb8e32405a7d74d98bdabcab783ec56
|
refs/heads/master
| 2023-05-11T20:11:19.015113
| 2020-09-14T23:58:49
| 2020-09-14T23:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
n=int(input())
while True:
m=int(input())
if m==0:
break
print(("{} is NOT ".format(m) if m%n!=0 else "{} is ".format(m))+"a multiple of {}.".format(n))
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
7d43725ec9a45d83dc444544244da257a3c11c20
|
38fe7f92d33a6091e5bfa42b1539bb7409eee0cd
|
/Solutions/MissingNumber/missingNo.py
|
96df0889669e11b4dd505034eb5994ddca5036de
|
[] |
no_license
|
phibzy/InterviewQPractice
|
bbda3fb1eb068d7870b6e8880cea16e342368bd7
|
0c8641ffb48c862ebb4e5471ae0915780b0a9f98
|
refs/heads/master
| 2023-03-28T08:12:47.146313
| 2021-03-30T23:54:13
| 2021-03-30T23:54:13
| 259,839,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
#!/usr/bin/python3
"""
@author : Chris Phibbs
@created : Thursday Mar 04, 2021 13:24:11 AEDT
@file : missingNo
"""
class Solution:
# The cheeeeeeeeeeeese
# Just find the difference between the expected sum of all numbers 0 to N
# and the sum of everything in nums
# TC: O(N) - possibly O(1) depending on list implementation
# SC: O(1)
def missingNumber(self, nums):
return (sum(range(len(nums)+1)) - sum(nums))
|
[
"phibzy@gmail.com"
] |
phibzy@gmail.com
|
796e614f4db6b151308e0cdcec154a1c3036ce39
|
247c5a57c5014c135a24455298fdcea2f2e59a40
|
/pretreatment/barrages_prepro.py
|
4261af6ff1a1762da9c34023c53962a4d6db77df
|
[
"Apache-2.0"
] |
permissive
|
gdh756462786/transformer_barrages
|
231d06939c0624ddeaf8c7de204d4dfa56e580c7
|
08d4de27cda354a13d9a7e50ddc8cee2f6cd348c
|
refs/heads/master
| 2022-04-12T12:09:34.286761
| 2020-02-24T08:07:46
| 2020-02-24T08:07:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,925
|
py
|
# -*- coding: utf-8 -*-
"""
@date: 2020.1.10
@author: liluoqin
@function:
process pretreatment data
"""
import os
import errno
import sentencepiece as spm
import re
import logging
import jieba
import sys
from sklearn.model_selection import train_test_split
sys.path.append("..")
from hparams import Hparams
logging.basicConfig(level=logging.INFO)
file_path = os.path.dirname(__file__)
def prepro(hp):
barrages_data = os.path.join(file_path, '..', hp.barrages_data)
# train
_prepro = lambda x: [line.split("\t")[0] for line in open(x, 'r', encoding="utf-8").readlines()
if not line.startswith("barrage")]
def _write(sents, fname):
with open(fname, 'w', encoding="utf-8") as fout:
fout.write("\n".join(sents))
logging.info("# Preprocessing")
prepro_sents = _prepro(barrages_data)
logging.info("# write preprocessed files to disk")
os.makedirs("../barrages_data/prepro", exist_ok=True)
# split data
train_x, test_x, train_y, test_y = train_test_split(prepro_sents, prepro_sents, test_size=0.2)
_write(prepro_sents, "../barrages_data/prepro/all_sents.txt")
_write(train_x, "../barrages_data/prepro/train_x.txt")
_write(train_y, "../barrages_data/prepro/train_y.txt")
_write(test_x, "../barrages_data/prepro/test_x.txt")
_write(test_y, "../barrages_data/prepro/test_y.txt")
logging.info("# Train a joint BPE model with sentencepiece")
os.makedirs("../barrages_data/segmented", exist_ok=True)
train = '--input=../barrages_data/prepro/all_sents.txt --pad_id=0 --unk_id=1 \
--bos_id=2 --eos_id=3\
--model_prefix=../barrages_data/segmented/bpe --vocab_size={} \
--model_type=bpe'.format(hp.vocab_size)
spm.SentencePieceTrainer.Train(train)
logging.info("# Load trained bpe model")
sp = spm.SentencePieceProcessor()
sp.Load("../barrages_data/segmented/bpe.model")
logging.info("# Segment")
def _segment_and_write(sents, fname):
with open(fname, "w", encoding="utf-8") as fout:
for sent in sents:
pieces = sp.EncodeAsPieces(sent)
fout.write(" ".join(pieces) + "\n")
_segment_and_write(train_x, "../barrages_data/segmented/train_x.bpe")
_segment_and_write(train_y, "../barrages_data/segmented/train_y.bpe")
_segment_and_write(test_x, "../barrages_data/segmented/test_x.bpe")
_segment_and_write(test_y, "../barrages_data/segmented/test_y.bpe")
logging.info("# Let's see how segmented data look like")
print("train:", open("../barrages_data/segmented/train_x.bpe", 'r', encoding="utf-8").readline())
print("test:", open("../barrages_data/segmented/test_x.bpe", 'r', encoding="utf-8").readline())
if __name__ == "__main__":
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
prepro(hp)
logging.info("# Done")
|
[
"1182953475@qq.com"
] |
1182953475@qq.com
|
b7a5e69b7f2fe61ac63f316b9653590b36e66ec3
|
8d179300d8583dd9738b6aea821a82803c7f17ea
|
/iom/iom/admin.py
|
0c7cabd4bf5cfe94ca6540a93d582235f908d5e2
|
[] |
no_license
|
tkleinen/acaciadata
|
4ad10269e79b04febc52f4b5d49b4f4c172df4a5
|
f142e9ec0048a2fc6af6bd8d5b357178c0ee93c7
|
refs/heads/master
| 2020-04-16T00:50:25.354427
| 2016-08-30T11:54:52
| 2016-08-30T11:54:52
| 33,363,185
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,861
|
py
|
'''
Created on Jun 16, 2015
@author: theo
'''
from django.contrib import admin
from django import forms
from django.forms import Textarea
from django.contrib.gis.db import models
from .models import UserProfile, Adres, Waarnemer, Meetpunt, Watergang, Organisatie
from acacia.data.models import Series, DataPoint
from django.core.exceptions import ValidationError
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
import re
class UserProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(UserAdmin):
inlines = (UserProfileInline, )
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(Watergang)
class WatergangAdmin(admin.ModelAdmin):
list_display = ('identifica', 'naamnl', 'typewater', 'breedtekla', 'hoofdafwat')
search_fields = ('identifica', 'naamnl', )
list_filter = ('hoofdafwat', 'breedtekla', 'typewater')
class DataPointInline(admin.TabularInline):
model = DataPoint
class SeriesInline(admin.TabularInline):
model = Series
inlines = (DataPointInline,)
@admin.register(Meetpunt)
class MeetpuntAdmin(admin.ModelAdmin):
list_display = ('name', 'nummer', 'waarnemer')
list_filter = ('waarnemer', )
search_fields = ('name', 'nummer', 'waarnemer', )
fields = ('waarnemer','nummer', 'location', 'watergang','description', )
formfield_overrides = {models.PointField:{'widget': Textarea}}
raw_id_fields = ('watergang',)
autocomplete_lookup_fields = {
'fk': ['watergang',],
}
def save_model(self,request,obj,form,change):
obj.name = 'MP%d.%d' % (obj.waarnemer.id, obj.nummer)
obj.save()
class AdresForm(forms.ModelForm):
model = Adres
def clean_postcode(self):
pattern = r'\d{4}\s*[A-Za-z]{2}'
data = self.cleaned_data['postcode']
if re.search(pattern, data) is None:
raise ValidationError('Onjuiste postcode')
return data
@admin.register(Adres)
class AdresAdmin(admin.ModelAdmin):
form = AdresForm
fieldsets = (
('', {'fields': (('straat', 'huisnummer', 'toevoeging'),('postcode', 'plaats')),
'classes': ('grp-collapse grp-open',),
}
),
)
@admin.register(Waarnemer)
class WaarnemerAdmin(admin.ModelAdmin):
list_display = ('achternaam', 'tussenvoegsel', 'voornaam', 'organisatie')
list_filter = ('achternaam', 'organisatie')
search_fields = ('achternaam', 'voornaam', )
ordering = ('achternaam', )
@admin.register(Organisatie)
class OrganisatieAdmin(admin.ModelAdmin):
raw_id_fields = ('adres',)
autocomplete_lookup_fields = {
'fk': ['adres',],
}
|
[
"tkleinen@gmail.com"
] |
tkleinen@gmail.com
|
0efdd1a0ebe604430982bbf8426b508f8fb2c0be
|
dd32803789eb65a518457491cdbda7a32811713d
|
/app_market/migrations/0064_shiftappealinsurance.py
|
ae7988c2161c3a61cc19580b4db159d561bdafa3
|
[] |
no_license
|
shmeser/giberno-postman
|
4d974f0c9e69a4f6fb64f124b754acf9d732d79b
|
19ffefa3f818c04831aaed28e6540274ba4fbc90
|
refs/heads/master
| 2023-08-14T22:35:09.034847
| 2021-09-01T11:36:43
| 2021-09-01T11:36:43
| 407,432,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,814
|
py
|
# Generated by Django 3.1.4 on 2021-08-03 07:47
import app_market.enums
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_market', '0063_auto_20210713_1532'),
]
operations = [
migrations.CreateModel(
name='ShiftAppealInsurance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('number', models.CharField(blank=True, max_length=255, null=True, verbose_name='Номер полиса')),
('insurer', models.TextField(blank=True, max_length=4096, null=True, verbose_name='Страховщик')),
('insured_birth_date', models.DateTimeField(blank=True, null=True)),
('insured_passport', models.CharField(blank=True, max_length=255, null=True)),
('insured_phone', models.CharField(blank=True, max_length=255, null=True)),
('insured_email', models.CharField(blank=True, max_length=255, null=True)),
('insured_reg_address', models.CharField(blank=True, max_length=255, null=True)),
('insured_address', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary', models.CharField(blank=True, max_length=255, null=True)),
('time_start', models.DateTimeField(blank=True, null=True, verbose_name='Начало страхового периода')),
('time_end', models.DateTimeField(blank=True, null=True, verbose_name='Окончание страхового периода')),
('address', models.CharField(blank=True, max_length=255, null=True)),
('currency', models.PositiveIntegerField(choices=[(0, 'BONUS'), (1, 'USD'), (2, 'EUR'), (3, 'RUB')], default=app_market.enums.Currency['RUB'], verbose_name='Валюта')),
('insurance_premium', models.PositiveIntegerField(blank=True, null=True)),
('insurance_payment_expiration', models.DateTimeField(blank=True, null=True, verbose_name='Срок оплаты страховой премии')),
('insured_description', models.TextField(blank=True, max_length=4096, null=True)),
('risks', django.contrib.postgres.fields.ArrayField(base_field=models.JSONField(blank=True, null=True), blank=True, null=True, size=10)),
('risks_description', models.TextField(blank=True, max_length=4096, null=True)),
('special_conditions', models.TextField(blank=True, max_length=4096, null=True)),
('insurer_proxy_number', models.CharField(blank=True, max_length=255, null=True, verbose_name='Номер доверенности представителя страховщика')),
('insurer_sign', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подпись страхователя')),
('confirmed_at', models.DateTimeField(blank=True, null=True)),
('appeal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='insurance', to='app_market.shiftappeal')),
],
options={
'verbose_name': 'Страховка на период рабочей смены',
'verbose_name_plural': 'Страховки на период рабочих смен',
'db_table': 'app_market__shift_appeal_insurance',
},
),
]
|
[
"sergey.shmelev@appcraft.team"
] |
sergey.shmelev@appcraft.team
|
87c2060d5dfd08a359e93d8d648496437635aff6
|
66f09ba0bc473b2e2eaa2c8bca6e4a97a550fbc5
|
/orders/migrations/0001_initial.py
|
f6cff0f9d32bf075d0f4d6c6acb5be846546325a
|
[] |
no_license
|
sanix-sandel/ZanduB
|
729aefb83b14f4dd8e669b1b21e07f5b96271f69
|
df1159c8ccf3fddda90a5eff21b43024ca02de03
|
refs/heads/master
| 2022-11-30T20:29:12.908518
| 2020-08-11T03:58:44
| 2020-08-11T03:58:44
| 269,744,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
# Generated by Django 3.0.7 on 2020-07-08 08:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
('stores', '0002_auto_20200706_0906'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.BooleanField(default=False)),
('paid', models.BooleanField(default=False)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='stores.Store')),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='products.Product')),
],
),
]
|
[
"sanicksikani@gmail.com"
] |
sanicksikani@gmail.com
|
b7f601fdccad85414b32a5e52a27d993d88b141f
|
f82dff6fcefbbfdfc78a6eab3739034729e164cc
|
/h.py
|
c16820148e78c1116617972be8cf038b6e18e1af
|
[] |
no_license
|
junelynpalma/j
|
89be92bb5375e853308f534a44bdcd1837c2c0e2
|
90509d88839af3825ffcab440489922bdc9e5d18
|
refs/heads/main
| 2023-08-07T04:18:49.834712
| 2021-09-27T13:12:35
| 2021-09-27T13:12:35
| 410,886,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
import schedule
import time
import os
import sys
os.system('node g.js http://www.dogcopc.com/LOGIN http.txt 600 GET PHPSESSID:gq15q5ho3eqq6gatdqm6nqdva5')
def job():
os.system('node g.js http://www.dogcopc.com/LOGIN http.txt 600 GET PHPSESSID:gq15q5ho3eqq6gatdqm6nqdva5')
schedule.every(1).seconds.do(job)
while True:
schedule.run_pending()
time.sleep(1)
|
[
"noreply@github.com"
] |
junelynpalma.noreply@github.com
|
d9c0423c56eef2d1798fb3f943cef5063284917d
|
2f308fdfd3e0b04a620db242694c27f9dcf80f68
|
/src/projects/migrations/0070_auto_20161015_1458.py
|
feb4546da5cdc53d28c2214689d8f3eaa8df4ab8
|
[] |
no_license
|
ouhouhsami/ecoquartier
|
53943b1f54f9e958f17424a7de37bf037c6899d1
|
1faf47a70e1f8821245588fca3d2b70c80129ae7
|
refs/heads/master
| 2020-06-30T00:58:10.013928
| 2016-12-13T16:58:57
| 2016-12-13T16:58:57
| 67,034,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 14:58
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
LabelEcoQuartier = apps.get_model("projects", "LabelEcoQuartier")
l, created = LabelEcoQuartier.objects.get_or_create(label="charte")
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0069_remove_project_type_operation'),
]
operations = [
migrations.RunPython(forward, backward),
]
|
[
"samuel.goldszmidt@gmail.com"
] |
samuel.goldszmidt@gmail.com
|
fc889c6c341df9b46d24a318d471370fb6bb9497
|
eba02c3c98f00288e81b5898a201cc29518364f7
|
/chapter_007/pets.py
|
83d401ba31dbbed09db27ad6c7c54fce7c610e20
|
[] |
no_license
|
kengru/pcrash-course
|
29f3cf49acfd4a177387634410d28de71d279e06
|
5aa5b174e85a0964eaeee1874b2be1c144b7c192
|
refs/heads/master
| 2021-05-16T09:36:16.349626
| 2017-10-11T17:56:56
| 2017-10-11T17:56:56
| 104,481,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
# Removing items from list with a while loop.
pets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']
print(pets)
while 'cat' in pets:
pets.remove('cat')
print(pets)
|
[
"kengrullon@gmail.com"
] |
kengrullon@gmail.com
|
aecd9b53dfc0f6dc6b969002346dc62541f907ee
|
c552cf5ed4714a3b5bdeab7af46092ff465b8c6a
|
/Python/SW Expert Academy/D4/6959. 이상한 나라의 덧셈게임.py
|
480bd6b6edb1aadc98eabbbeeea28ef8a2dfe774
|
[] |
no_license
|
ksb8320/Algorithm
|
a786c5ab04e28ae9b3d180a77850899328075443
|
74b33f81eefa4cebf0dd8f1c3d65394d2aede372
|
refs/heads/master
| 2022-12-17T22:49:17.144572
| 2020-09-23T14:45:21
| 2020-09-23T14:45:21
| 253,751,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
import sys
sys.stdin=open("input.txt")
def plus():
global cnt
while len(lst)>1:
new=lst[0]+lst[1]
if new<10:
lst.pop(0)
lst.pop(0)
lst.insert(0,new)
cnt+=1
else:
lst[0]=1
lst[1]=new-10
cnt+=1
if cnt%2==1:
return "A"
else:
return "B"
for t in range(int(input())):
num=input()
lst=[]
for i in range(len(num)):
lst.append(int(num[i]))
cnt=0
print("#{} {}".format(t+1,plus()))
|
[
"ksb8320@gmail.com"
] |
ksb8320@gmail.com
|
6355476cfb93b8ed838af833f12252e27623f0f5
|
316b8375a7ef8095f09973d13f5a49bc7fbe7580
|
/leetcode/332.py
|
06d41eae074e21c5ecfafeb7129b8286192a3c5d
|
[] |
no_license
|
zhaolijian/suanfa
|
9a8d23fbca01d994f7eef24631783c4b7ed25683
|
4f3b25f360f30c0e604ba4dc4d5774ccb5f25b32
|
refs/heads/master
| 2023-06-08T17:12:41.522937
| 2021-06-27T08:13:16
| 2021-06-27T08:13:16
| 313,269,459
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,123
|
py
|
# 给定一个机票的字符串二维数组 [from, to],子数组中的两个成员分别表示飞机出发和降落的机场地点,
# 对该行程进行重新规划排序。所有这些机票都属于一个从 JFK(肯尼迪国际机场)出发的先生,所以该行程必须从 JFK 开始。
# 如果存在多种有效的行程,你可以按字符自然排序返回最小的行程组合。
# 例如,行程 ["JFK", "LGA"] 与 ["JFK", "LGB"] 相比就更小,排序更靠前
# 所有的机场都用三个大写字母表示(机场代码)。
# 假定所有机票至少存在一种合理的行程。
# 方法:Hierholzer 算法
# Hierholzer 算法用于在连通图中寻找欧拉路径,其流程如下:
# 1.从起点出发,进行深度优先搜索。
# 2.每次沿着某条边从某个顶点移动到另外一个顶点的时候,都需要删除这条边。
# 3.如果没有可移动的路径,则将所在节点加入到栈中,并返回。
import collections
class Solution:
def findItinerary(self, tickets):
d = collections.defaultdict(list) #邻接表
for f, t in tickets:
d[f] += [t] #路径存进邻接表
for f in d:
d[f].sort() #邻接表排序
ans = []
def dfs(f): #深搜函数
while d[f]:
dfs(d[f].pop(0))#路径检索
# 往深里找找不到说明该机场为最终降落机场,
ans.insert(0, f) #放在最前
dfs('JFK')
return ans
# 或
from collections import defaultdict
class Solution:
def findItinerary(self, tickets):
d = defaultdict(list)
for start, end in tickets:
d[start].append(end)
for ele in d:
d[ele].sort()
res = []
def dfs(node):
nonlocal res
while d[node]:
dfs(d[node].pop(0))
res = [node] + res
dfs('JFK')
return res
if __name__ == '__main__':
s = Solution()
tickets = [["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]]
print(s.findItinerary(tickets))
|
[
"820913569@qq.com"
] |
820913569@qq.com
|
5095f0660d9382f5d1d97384535279c1d362de76
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_YCHF/KCB_YCHF_MM/SHOffer/YCHF_KCBYCHF_SHBP_356.py
|
5d466bcc423c69283f3b9c0e2fe9973b9b6699aa
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,487
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_SHBP_356(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_SHBP_356')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_SHBP_356(self):
title = '重启数据库服务(沪A本方最优初始卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '初始',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
ebf10c635faeba2b5910b7e187fea1e9f26f56e4
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/107/usersdata/195/52193/submittedfiles/questao3.py
|
3ebb503b6b1fa43bc646831b23a3e4d72dde619d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# -*- coding: utf-8 -*-
p=int(input('digite p:'))
q=int(input('digite q:'))
contador=0
i=2
while i<p:
if p%i==0:
contador=contador+1
i=i+1
while i<q:
if q%i==0:
contador=contador+1
i=i+1
if contador==0 and p%2!=0 and q%2!=0:
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
0b88ec4badafd9b2ae3dca8979aed0160c9e81ee
|
f3f7fc5bf1d5657e7a67e46aee4b105198767889
|
/manage.py
|
f0c87e8e4ef01a8361270108fde7443535393ad4
|
[] |
no_license
|
xjr7670/12306
|
a2a16b73ce3cdb8ff1f8646429c2dc40716706fb
|
32f065798732de744ef3a66739598af53a63bb32
|
refs/heads/master
| 2021-01-21T02:55:53.997748
| 2016-09-24T02:07:23
| 2016-09-24T02:07:23
| 68,806,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
from app import create_app
from flask_script import Manager, Shell
app = create_app('default')
manager = Manager(app)
manager.add_command("shell")
if __name__ == '__main__':
manager.run()
|
[
"xjr30226@126.com"
] |
xjr30226@126.com
|
de183b0867da57105653f428107297793038dc43
|
63d6a6809773c49edee2894fbe45915763756f90
|
/authlib/admin_oauth/views.py
|
5faeec307539ae210eeb130ba46e959308b12173
|
[
"MIT"
] |
permissive
|
barseghyanartur/django-authlib
|
faaba71d80bec3331f9cd1dcd745dbff0ff96f6b
|
4b4159eba619f6174d1f1e1cf33adf4893fa2315
|
refs/heads/master
| 2021-08-23T08:22:56.390862
| 2017-12-04T08:52:50
| 2017-12-04T08:54:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
import re
from django import VERSION
from django.conf import settings
from django.contrib import auth, messages
from django.shortcuts import redirect
from django.utils.http import is_safe_url
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from authlib.google import GoogleOAuth2Client
REDIRECT_SESSION_KEY = 'admin-oauth-next'
ADMIN_OAUTH_PATTERNS = settings.ADMIN_OAUTH_PATTERNS
if VERSION < (1, 11):
_orig_is_safe_url = is_safe_url
def is_safe_url(url, allowed_hosts):
host, = allowed_hosts
return _orig_is_safe_url(url=url, host=host)
def retrieve_next(request):
next = request.session.pop(REDIRECT_SESSION_KEY, None)
return (
next
if is_safe_url(url=next, allowed_hosts=[request.get_host()])
else None
)
@never_cache
def admin_oauth(request):
client = GoogleOAuth2Client(request)
if request.GET.get('next'):
request.session[REDIRECT_SESSION_KEY] = request.GET['next']
if all(key not in request.GET for key in ('code', 'oauth_token')):
return redirect(client.get_authentication_url())
user_data = client.get_user_data()
email = user_data.get('email', '')
if email:
for pattern, user_mail in ADMIN_OAUTH_PATTERNS:
if re.search(pattern, email):
user = auth.authenticate(email=user_mail)
if user and user.is_staff:
auth.login(request, user)
return redirect(retrieve_next(request) or 'admin:index')
messages.error(
request,
_('No email address received or email domain unknown.'),
)
return redirect('admin:login')
|
[
"mk@feinheit.ch"
] |
mk@feinheit.ch
|
690313cbf83db05c3a09cb68f375a86b770771d5
|
d548f1bde0d20dab787b59695e5467a44db1cef3
|
/CarParkArcGisApi/CarParkArcGisApi/GetCurrentLocationApi.py
|
8c5e60465173ffcf519b00b199168438cd385aaa
|
[
"MIT"
] |
permissive
|
moazzamwaheed2017/carparkapi
|
2f53ab5b823d9afa11adc14073d7e147ca1d1de6
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
refs/heads/master
| 2023-01-12T03:51:42.497815
| 2020-02-25T14:00:37
| 2020-02-25T14:00:37
| 236,687,771
| 0
| 0
|
MIT
| 2023-01-07T14:21:30
| 2020-01-28T08:20:00
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 345
|
py
|
import requests
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request_url = 'https://get.geojs.io/v1/ip/geo/' + my_ip + '.json'
geo_request = requests.get(geo_request_url)
geo_data = geo_request.json()
print('Latitude: ' + geo_data['latitude'])
print('Longitude: ' + geo_data['longitude'])
|
[
"moazzamwaheed@gmail.com"
] |
moazzamwaheed@gmail.com
|
a9221ed4b7a9c89294debbcd8f295e48195a8098
|
9a9cffc79943e1846cfb2b7463b889aac102fcfe
|
/quickunit/vcs/git.py
|
e2d0143ff6c0ef3f14ee2b3d8262511f10e9f17b
|
[
"Apache-2.0"
] |
permissive
|
dcramer/quickunit
|
5c7483f7b33758df3bc3181409ec95fb2c3f87e1
|
f72b038aaead2c6f2c6013a94a1823724f59a205
|
refs/heads/master
| 2020-05-17T09:20:43.604622
| 2013-07-29T21:32:50
| 2013-07-29T21:32:50
| 3,350,340
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
from subprocess import Popen, PIPE, STDOUT
from quickunit.diff import DiffParser
from quickunit.vcs.base import ChangedFile
def parse_commit(parent=None):
if parent is None:
parent = 'master'
proc = Popen(['git', 'merge-base', 'HEAD', parent], stdout=PIPE, stderr=STDOUT)
parent_revision = proc.stdout.read().strip()
# pull in our diff
# git diff `git merge-base HEAD master`
proc = Popen(['git', 'diff', parent_revision], stdout=PIPE, stderr=STDOUT)
diff = proc.stdout.read().strip()
parser = DiffParser(diff)
files = []
for file in parser.parse():
if file['is_header']:
continue
# file was removed
if file['new_filename'] == '/dev/null':
continue
filename = file['new_filename'][2:]
is_new = (file['old_filename'] == '/dev/null')
files.append(ChangedFile(filename, is_new))
return files
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
f86e2edac18dd5a144ac9e4e8e186ac315bc9758
|
eb85b501de159dd2c549e4d2433a03592aae5e15
|
/evernote_to_sqlite/cli.py
|
b2c550e3c6c38a26941cd9359960f032d9af4bb7
|
[
"Apache-2.0"
] |
permissive
|
ktaranov/evernote-to-sqlite
|
f6b3912da78ee74afcf9a43b4b2b2db05eba05c7
|
92254b71075c8806bca258c939e24af8397cdf98
|
refs/heads/main
| 2023-01-20T04:32:42.877585
| 2020-10-16T20:15:51
| 2020-10-16T20:15:51
| 319,658,620
| 1
| 0
|
Apache-2.0
| 2020-12-08T14:11:02
| 2020-12-08T14:11:02
| null |
UTF-8
|
Python
| false
| false
| 914
|
py
|
import sqlite_utils
import click
import os
from .utils import find_all_tags, save_note, ensure_indexes
@click.group()
@click.version_option()
def cli():
"Tools for converting Evernote content to SQLite"
@cli.command()
@click.argument(
"db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument(
"enex_file",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def enex(db_path, enex_file):
"Convert Evernote .enex exports to SQLite"
file_length = os.path.getsize(enex_file)
fp = open(enex_file)
db = sqlite_utils.Database(db_path)
with click.progressbar(length=file_length, label="Importing from ENEX") as bar:
for tag, note in find_all_tags(fp, ["note"], progress_callback=bar.update):
save_note(db, note)
fp.close()
ensure_indexes(db)
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
1fb23b2832fa8ad3d8b5f3b2757274ad1463a27e
|
02862f0b86638cd4e252bfd6bb92be931c10d569
|
/algorithms/arrays/next_permutation/next_permutation.py
|
1ad5e3cd64ea4b8252e014e068282398962daa08
|
[] |
no_license
|
Himstar8/Algorithm-Enthusiasts
|
ceb65df893d668a59018cbda278c3a03622a6311
|
d3634daa7676e5a06646e0dbfc4ed30dac18ca9d
|
refs/heads/master
| 2020-07-09T16:44:50.520423
| 2019-03-21T18:20:10
| 2019-03-21T18:20:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
def next_permutation(nums):
def find_min_larger_index(idx, n):
while idx < len(nums) and nums[idx] > n:
idx += 1
return idx - 1
i = len(nums) - 1
while i > 0 and nums[i] <= nums[i-1]:
i -= 1
if i == 0:
nums.reverse()
else:
idx = find_min_larger_index(i, nums[i-1])
nums[idx], nums[i-1] = nums[i-1], nums[idx]
start = i
end = len(nums) - 1
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
if __name__ == '__main__':
nums = [5, 1, 1]
next_permutation(nums)
assert(nums == [1, 1, 5])
nums = [2, 1, 4, 3]
next_permutation(nums)
assert(nums == [2, 3, 1, 4])
nums = [1, 5, 1]
next_permutation(nums)
assert(nums == [5, 1, 1])
|
[
"zachliugis@gmail.com"
] |
zachliugis@gmail.com
|
27436bf65203665f1e775cd08464696bf984e191
|
67612c27c6d79ae180a5bc266833899abfefe9f5
|
/面试题64. 求1+2+…+n LCOF.py
|
d42954a5bf567328df88e46091a401b867c5b820
|
[] |
no_license
|
Katherinaxxx/leetcode
|
7e9d0bd7dc613a824116f1247f42bfc33e485ff3
|
dcebf49d1e024b9e69c4d9606c8afb32b9d07029
|
refs/heads/master
| 2023-01-27T20:14:09.459296
| 2023-01-08T07:01:53
| 2023-01-08T07:01:53
| 215,688,672
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/6/2 上午11:20
@Author : Catherinexxx
@Site :
@File : 面试题64. 求1+2+…+n LCOF.py
@Software: PyCharm
"""
"""
求 1+2+...+n ,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
"""
# math (1+n)n/2 O(1)time
class Solution:
def sumNums(self, n: int) -> int:
return (1+n)*n//2
|
[
"359391236@qq.com"
] |
359391236@qq.com
|
96940db9386ddb1089016400e3a545dda5a13801
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/Lbr/br-66_MD_NVT_rerun/set_1ns_equi_1.py
|
4a79ed1108775404ea3880b83c80a1b28ee355a5
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/Lbr/MD_NVT_rerun/ti_one-step/br_66/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../br-66_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
0c6b797449a22309a265e557ebd1dadf4115400b
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/Lambda-Resource-Static-Assets/2-resources/BLOG/Data-Structures/1-Python/Python-master/linear_algebra/src/rayleigh_quotient.py
|
69bbbac119e80d48a3cd1670171c31e6020d8d95
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
"""
https://en.wikipedia.org/wiki/Rayleigh_quotient
"""
import numpy as np
def is_hermitian(matrix: np.array) -> bool:
"""
Checks if a matrix is Hermitian.
>>> import numpy as np
>>> A = np.array([
... [2, 2+1j, 4],
... [2-1j, 3, 1j],
... [4, -1j, 1]])
>>> is_hermitian(A)
True
>>> A = np.array([
... [2, 2+1j, 4+1j],
... [2-1j, 3, 1j],
... [4, -1j, 1]])
>>> is_hermitian(A)
False
"""
return np.array_equal(matrix, matrix.conjugate().T)
def rayleigh_quotient(A: np.array, v: np.array) -> float:
"""
Returns the Rayleigh quotient of a Hermitian matrix A and
vector v.
>>> import numpy as np
>>> A = np.array([
... [1, 2, 4],
... [2, 3, -1],
... [4, -1, 1]
... ])
>>> v = np.array([
... [1],
... [2],
... [3]
... ])
>>> rayleigh_quotient(A, v)
array([[3.]])
"""
v_star = v.conjugate().T
return (v_star.dot(A).dot(v)) / (v_star.dot(v))
def tests() -> None:
A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
v = np.array([[1], [2], [3]])
assert is_hermitian(A), f"{A} is not hermitian."
print(rayleigh_quotient(A, v))
A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
assert is_hermitian(A), f"{A} is not hermitian."
assert rayleigh_quotient(A, v) == float(3)
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
dd9814f4339308678dd3b49ec818c02f22cb4071
|
32174f2b74b286a52a2f3b0bfd120a0711bfc6dc
|
/sample/web/app/blogs/views.py
|
1eed5546a6795b31d89b50bb7d975bad02c62a42
|
[
"MIT"
] |
permissive
|
hdknr/django-mediafiles
|
d13172162506cba2abdab0d85bc2815e2e24b6e6
|
7526e35eb7f532e36c95e7aa76290bb95a9ac41a
|
refs/heads/master
| 2020-06-04T03:16:28.824865
| 2014-10-30T04:10:40
| 2014-10-30T04:10:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,787
|
py
|
# -*- coding: utf-8 -*-
from django import template
from django.http import HttpResponse,HttpResponseRedirect,Http404
from django.shortcuts import render_to_response
#
import uuid
#
from models import Blog
from forms import BlogForm
from mediafiles.models import MediaFile
from mediafiles.forms import MediaFileForm,media_formset
#
def media(request,id):
m = MediaFile.objects.get(id=id )
return m.response( HttpResponse )
def media_preview(request,id):
return render_to_response('blogs/media/preview.html',
{'media': MediaFile.objects.get(id=id) },
context_instance=template.RequestContext(request))
def blog_edit_simple(request,id):
blog = Blog.objects.get(id=id)
if request.method == "GET":
form = BlogForm(instance=blog,prefix='blog')
media_form = MediaFileForm(prefix='media')
else:
form = BlogForm(request.POST,instance=blog,prefix='blog')
media_form = MediaFileForm(
request.POST,request.FILES,prefix='media')
if form.is_valid() :
form.save()
if media_form.is_valid():
media_form.instance.user = request.user
media_form.instance.slug = uuid.uuid1().hex
media_form.save()
blog.medias.add(media_form.instance)
media_form = MediaFileForm(prefix='media')
return render_to_response('blogs/blog/edit_simple.html',
{'form': form,'media_form':media_form, },
context_instance=template.RequestContext(request))
def blog_edit_formset(request,id):
blog = Blog.objects.get(id=id)
form = BlogForm(request.POST if request.method =="POST" else None ,
instance=blog,prefix='blog')
medias = media_formset(request,blog.medias.all())
if request.method == "POST":
if form.is_valid() :
form.save()
if medias.is_valid():
for media in medias.forms:
if media.is_valid() and media.instance.data:
if media.cleaned_data.get('removing',False):
blog.medias.remove(media.instance)
else:
media.instance.user = request.user if request.user.is_authenticated() else None
media.save()
blog.medias.add(media.instance)
else:
#: error handling
print medias.errors
#: for increase medias.extra_forms after adding new mediafile
medias = media_formset(None,blog.medias.all())
return render_to_response('blogs/blog/edit_formset.html',
{'form': form,'medias':medias, },
context_instance=template.RequestContext(request))
def blog_edit(request,id):
return blog_edit_formset(request,id)
|
[
"gmail@hdknr.com"
] |
gmail@hdknr.com
|
d8d457d18bf39b70f109b94084363f9d7ad6a62d
|
c7d124bbd7ab66ad7acd50765a0d5c11e7925d16
|
/generate_test.py
|
402d03607f30aeb012c03c5f3127892e4e05fff1
|
[] |
no_license
|
lambdaloop/scheduling
|
aa240c568eb974d57d7fc93af3cd866293c1f417
|
fd111594d2c5a652b42796027195a352db3a9fce
|
refs/heads/master
| 2021-06-03T19:26:16.300509
| 2019-04-14T21:26:28
| 2019-04-14T21:26:28
| 29,323,677
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
#!/usr/bin/env python2
import csv
import random
f = open('test.csv', 'w')
fieldnames = ['name', 'preferences', 'num_slots']
writer = csv.DictWriter(f, fieldnames)
writer.writeheader()
n_people = 20
n_slots = 9
for i in range(n_people):
#name = chr(65 + i)
name = str(i)
slots = random.randint(1, 3)
pref_num = min(slots + random.randint(0, 3), n_slots)
# if random.random() < 0.2:
# pref_num -= 1
pref = sorted(random.sample(range(n_slots), pref_num))
writer.writerow({
'name': name,
'num_slots': slots,
'preferences': ' '.join(map(str, pref))
})
f.close()
|
[
"krchtchk@gmail.com"
] |
krchtchk@gmail.com
|
39de8218dbf4b99aaa6290c59d2f7556322db935
|
371fe9a1fdeb62ad1142b34d732bde06f3ce21a0
|
/scripts/extract_loops_seq_approx.py
|
36d5e97c42658a9a6bd31287b6fd2f7138e26ba2
|
[] |
no_license
|
maickrau/rdna_resolution
|
971f3b7e803565c9432be69b8e2a2852f55b8b79
|
aab42310c31e655cbbc318331082fa3436d69075
|
refs/heads/master
| 2023-03-03T05:14:33.966930
| 2021-02-17T20:45:20
| 2021-02-17T20:45:20
| 339,851,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,706
|
py
|
#!/usr/bin/python
import sys
import re
loop_middle = sys.argv[1] # format ACGTAGA...
# loop ends from sys.argv[2]
# loop ends from sys.argv[3]
max_edits = int(sys.argv[4])
# fasta from stdin
# loops to stdout
def revcomp(s):
comp = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
return "".join(comp[c] for c in s[::-1])
loop_ends = set()
for v in sys.argv[2:4]:
loop_ends.add(v) # format ACGACT...
loop_ends.add(revcomp(v))
assert len(v) == len(loop_middle)
rev_loop_middle = revcomp(loop_middle)
def find_seq_positions(seq, query, mismatches):
dp_column = [0]
new_dp_column = [0]
start_index = [0]
new_start_index = [0]
for i in range(0, len(query)):
dp_column.append(i+1)
new_dp_column.append(i+1)
start_index.append(0)
new_start_index.append(0)
result = []
last_score = 0
last_valid = 0
for i in range(0, len(seq)):
new_dp_column[0] = 0
new_start_index[0] = i
new_last_valid = 0
for j in range(0, len(query)):
new_dp_column[j+1] = new_dp_column[j] + 1
new_start_index[j+1] = new_start_index[j]
match_score = 0 if query[j] == seq[i] else 1
if dp_column[j] + match_score < new_dp_column[j+1]:
new_dp_column[j+1] = dp_column[j] + match_score
new_start_index[j+1] = start_index[j]
if dp_column[j+1] + 1 < new_dp_column[j+1]:
new_dp_column[j+1] = dp_column[j+1] + match_score
new_start_index[j+1] = start_index[j+1]
if new_dp_column[j+1] <= mismatches: new_last_valid = j+1
if new_dp_column[j+1] > mismatches and j+1 > last_valid+1 and j+1 > new_last_valid+1:
new_dp_column[-1] = mismatches+1
break
last_valid = new_last_valid
if new_dp_column[-1] <= mismatches:
skip = False
if len(result) > 0:
if result[-1][0] == new_start_index[-1]:
if last_score <= new_dp_column[-1]:
skip = True
else:
result = result[:-1]
if not skip:
result.append((new_start_index[-1], i))
last_score = new_dp_column[-1]
(dp_column, new_dp_column) = (new_dp_column, dp_column)
(start_index, new_start_index) = (new_start_index, start_index)
return result
def output_loops(name, seq, mismatches):
seq = seq.replace('a', 'A').replace('c', 'C').replace('g', 'G').replace('t', 'T')
fw = True
seq_poses = find_seq_positions(seq, loop_middle, mismatches)
if len(seq_poses) == 0:
seq = revcomp(seq)
seq_poses = find_seq_positions(seq, loop_middle, mismatches)
if len(seq_poses) == 0: return
assert len(seq_poses) > 0
loop_start_end_poses = []
for end in loop_ends:
loop_start_end_poses += find_seq_positions(seq, end, mismatches)
if len(loop_start_end_poses) > 2:
print(seq_poses)
print(loop_start_end_poses)
print(name)
assert len(loop_start_end_poses) <= 2
loop_start_end_poses.sort()
loop_middle_poses = [p[0] for p in seq_poses]
loop_start_end_poses = [p[0] for p in loop_start_end_poses]
if len(loop_middle_poses) + len(loop_start_end_poses) == 1: return
if len(loop_start_end_poses) == 1:
if not (loop_start_end_poses[0] < loop_middle_poses[0] or loop_start_end_poses[0] > loop_middle_poses[-1]):
print(name)
print(seq)
print(loop_start_end_poses)
print(loop_middle_poses)
assert loop_start_end_poses[0] < loop_middle_poses[0] or loop_start_end_poses[0] > loop_middle_poses[-1]
if len(loop_start_end_poses) == 2:
if not (loop_start_end_poses[0] < loop_middle_poses[0] and loop_start_end_poses[1] > loop_middle_poses[-1]):
print(name)
print(seq)
print(loop_start_end_poses)
print(loop_middle_poses)
assert loop_start_end_poses[0] < loop_middle_poses[0] and loop_start_end_poses[1] > loop_middle_poses[-1]
loop_seqs = []
if (len(loop_start_end_poses) == 1 and loop_start_end_poses[0] < loop_middle_poses[0]) or len(loop_start_end_poses) == 2:
assert loop_start_end_poses[0] < loop_middle_poses[0]
loop_seqs.append(seq[loop_start_end_poses[0]:loop_middle_poses[0]+len(loop_middle)])
for i in range(1, len(loop_middle_poses)):
loop_seqs.append(seq[loop_middle_poses[i-1]:loop_middle_poses[i]+len(loop_middle)])
if (len(loop_start_end_poses) == 1 and loop_start_end_poses[0] > loop_middle_poses[-1]) or len(loop_start_end_poses) == 2:
assert loop_start_end_poses[-1] > loop_middle_poses[-1]
loop_seqs.append(seq[loop_middle_poses[-1]:loop_start_end_poses[-1]+len(loop_middle)])
assert len(loop_seqs) > 0
for i in range(0, len(loop_seqs)):
print(name + "_loop_" + str(i) + "\t" + loop_seqs[i])
current_seq = ""
current_name = ""
for l in sys.stdin:
if l[0] == '>':
if len(current_seq) > 0:
output_loops(current_name, current_seq, max_edits)
current_name = l[1:].strip().split(' ')[0].strip()
current_seq = ""
else:
current_seq += l.strip()
if len(current_seq) > 0:
output_loops(current_name, current_seq, max_edits)
|
[
"m_rautiainen@hotmail.com"
] |
m_rautiainen@hotmail.com
|
966016a6c669ca24a56fb831863b51dfbec863e3
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/434e9e1d8f62ba49daa2f720956e048e336b3c9c-<clone>-bug.py
|
3c358f1301ecb059826eef335a56b1bc630c3ce2
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
def clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit):
' makes a new git repo if it does not already exist '
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [git_path, 'clone']
if bare:
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
if depth:
if ((version == 'HEAD') or refspec):
cmd.extend(['--depth', str(depth)])
elif (is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)):
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
module.warn('Ignoring depth argument. Shallow clones are only available for HEAD, branches, tags or in combination with refspec.')
if reference:
cmd.extend(['--reference', str(reference)])
cmd.extend([repo, dest])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if (remote != 'origin'):
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend(['--depth', str(depth)])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.